In [ ]:
import numpy as np
import pandas as pd
import h5py
import librosa
import matplotlib.pyplot as plt
from matplotlib import image
import os
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.preprocessing import OneHotEncoder, StandardScaler, LabelBinarizer, LabelEncoder
from sklearn.metrics import accuracy_score, r2_score, confusion_matrix, precision_recall_fscore_support, precision_recall_curve
from sklearn.model_selection import train_test_split
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils import shuffle
from tensorflow.keras.callbacks import EarlyStopping
from random import seed as base_set_seed
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Conv1D, MaxPooling1D, Flatten, LSTM, SimpleRNN, Reshape, Permute
from keras.applications import imagenet_utils, ResNet50
from keras.applications.imagenet_utils import preprocess_input
from keras.datasets import imdb
from keras.utils import to_categorical
from scipy.sparse import csr_matrix
In [2]:
# read in the data
f = h5py.File('bird_spectrograms.hdf5', 'r')
birds = {}
for key in f.keys():
birds[key] = f[key]
print(birds[key].shape)
print(birds.keys())
(128, 517, 66) (128, 517, 172) (128, 517, 144) (128, 517, 45) (128, 517, 125) (128, 517, 84) (128, 517, 630) (128, 517, 37) (128, 517, 187) (128, 517, 263) (128, 517, 137) (128, 517, 91) dict_keys(['amecro', 'amerob', 'bewwre', 'bkcchi', 'daejun', 'houfin', 'houspa', 'norfli', 'rewbla', 'sonspa', 'spotow', 'whcspa'])
In [76]:
# pick 10 percent from each bird for initial training of multiclass model
eda_indices = {}
eda_birds = {}
np.random.seed(112358)
# Here some of the imbalance in the species will be preserved for exploring multiclassification model sampling 5% from each bird,
# with a minimum of 10 examples for each bird
for key in birds.keys():
n_grams = birds[key].shape[2]
eda_indices[key] = np.sort(np.random.choice(np.arange(0, n_grams), max(int(0.05*n_grams), 10), replace=False))
eda_birds[key] = birds[key][:, :, eda_indices[key]]
print(eda_birds[key].shape)
(128, 517, 10) (128, 517, 10) (128, 517, 10) (128, 517, 10) (128, 517, 10) (128, 517, 10) (128, 517, 31) (128, 517, 10) (128, 517, 10) (128, 517, 13) (128, 517, 10) (128, 517, 10)
In [42]:
sr = 22050
for key in eda_birds.keys():
for i in range(5):
plt.figure(figsize=(8, 6))
librosa.display.specshow(eda_birds[key][:,:,i], x_axis='time', y_axis='mel', sr=sr, cmap='gray_r')
plt.title(f'Audio Spectrogram for {key} (Ex. {i+1})')
plt.show()
In [5]:
# plot a spectrogram from the american crow
plt.imshow(eda_birds['amecro'][:,:,0], aspect="auto", cmap="magma")
plt.colorbar(label="Amplitude")
plt.xlabel("Time")
plt.ylabel("Frequency")
plt.title(f"Amecro spectrogram example")
plt.show()
In [77]:
# combine eda dataset
eda_data_keys = np.array(list(eda_birds.keys()))
eda_data_full = np.concatenate([birds[key] for key in eda_data_keys], axis=-1)
eda_data_labels_full = np.concatenate([np.full(birds[key].shape[-1], ind) for (ind, key) in enumerate(eda_data_keys)])
eda_data = np.concatenate([eda_birds[key] for key in eda_data_keys], axis=-1)
eda_data_labels = np.concatenate([np.full(eda_birds[key].shape[-1], ind) for (ind, key) in enumerate(eda_data_keys)])
def labelsToBirds(labels):
return eda_data_keys[labels]
In [45]:
# pick a sample for training and validation for full data set
n_eda_samples = eda_data_full.shape[-1]
test_prop = 0.2
fulldata_train_idx, fulldata_test_idx = train_test_split(np.arange(0, eda_data_full.shape[-1]), test_size=test_prop, random_state=112358)
full_train_data = np.transpose(eda_data_full[:,:, fulldata_train_idx], (2, 0, 1)).reshape(-1, 128, 517, 1) # reshape and add single channel for cnn
full_train_labels = to_categorical(eda_data_labels_full[fulldata_train_idx], 12)
full_train_data, full_train_labels = shuffle(full_train_data, full_train_labels, random_state=112358)
full_test_data = np.transpose(eda_data_full[:,:, fulldata_test_idx], (2, 0, 1)).reshape(-1, 128, 517, 1)
full_test_labels = to_categorical(eda_data_labels_full[fulldata_test_idx], 12)
full_test_data, full_test_labels = shuffle(full_test_data, full_test_labels, random_state=112358)
print(full_test_labels.shape)
print(full_train_labels.shape)
print(full_test_labels[0])
# create weights for training loss relative to response class imbalance
fulldata_class_weights = compute_class_weight(class_weight="balanced",
classes=np.unique(eda_data_labels_full[fulldata_train_idx], axis=0),
y=eda_data_labels_full[fulldata_train_idx])
fulldata_class_weight_dict = {i: fulldata_class_weights[i] for i in range(len(fulldata_class_weights))}
print(fulldata_class_weight_dict)
(397, 12)
(1584, 12)
[0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
{0: np.float64(2.4), 1: np.float64(0.9428571428571428), 2: np.float64(1.1785714285714286), 3: np.float64(3.3), 4: np.float64(1.375), 5: np.float64(2.0307692307692307), 6: np.float64(0.26294820717131473), 7: np.float64(4.551724137931035), 8: np.float64(0.8859060402684564), 9: np.float64(0.616822429906542), 10: np.float64(1.2), 11: np.float64(1.8333333333333333)}
In [78]:
# pick a sample for training and validation
n_eda_samples = eda_data.shape[-1]
test_prop = 0.2
train_indices, test_indices = train_test_split(np.arange(0, eda_data.shape[-1]), test_size=test_prop, random_state=112358)
train_data = np.transpose(eda_data[:,:, train_indices], (2, 0, 1)).reshape(-1, 128, 517, 1) # reshape and add single channel for cnn
train_labels = to_categorical(eda_data_labels[train_indices], 12)
test_data = np.transpose(eda_data[:,:, test_indices], (2, 0, 1)).reshape(-1, 128, 517, 1)
test_labels = to_categorical(eda_data_labels[test_indices], 12)
print(test_labels.shape)
print(train_labels.shape)
print(test_labels[0])
# create weights for training loss relative to response class imbalance
class_weights = compute_class_weight(class_weight="balanced",
classes=np.unique(eda_data_labels[train_indices], axis=0),
y=eda_data_labels[train_indices])
class_weight_dict = {i: class_weights[i] for i in range(len(class_weights))}
print(class_weight_dict)
(29, 12)
(115, 12)
[0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0.]
{0: np.float64(1.0648148148148149), 1: np.float64(1.369047619047619), 2: np.float64(1.1979166666666667), 3: np.float64(1.1979166666666667), 4: np.float64(0.9583333333333334), 5: np.float64(1.369047619047619), 6: np.float64(0.45634920634920634), 7: np.float64(1.0648148148148149), 8: np.float64(1.1979166666666667), 9: np.float64(0.7986111111111112), 10: np.float64(1.1979166666666667), 11: np.float64(1.1979166666666667)}
In [8]:
# try a simple neural network with a hidden layer or two and dropout regularization
In [ ]:
# try using some convolutional and pooling layers, still treating the spectrograms like a 2d image
# initial input shape: 128 by 517
# for an audio spectrogram, we might be interested in rectangular kernels that focus on either time variation or
# frequency variation
conv_width = 3
conv_height = 3
pool_width = 2
dropout_rate = 0.3
model_cnn = Sequential(
[
Conv2D(filters=32, kernel_size=(conv_height, conv_width), padding='same', activation='relu', input_shape=(128,517, 1)),
MaxPooling2D(pool_size=(pool_width, pool_width)),
Conv2D(filters=64, kernel_size=(conv_height, conv_width), padding='same', activation='relu'),
MaxPooling2D(pool_size=(pool_width, pool_width)),
Conv2D(filters=128, kernel_size=(conv_height, conv_width), padding='same', activation='relu'),
MaxPooling2D(pool_size=(pool_width, pool_width)),
Conv2D(filters=256, kernel_size=(conv_height, conv_width), padding='same', activation='relu'),
MaxPooling2D(pool_size=(pool_width, pool_width)),
Flatten(),
Dropout(rate=dropout_rate),
Dense(units=512, activation='relu'),
Dense(units=12, activation='softmax') # 12 bird species
])
early_stopping = EarlyStopping(
monitor='val_loss',
patience=15,
restore_best_weights=True
)
print(model_cnn.summary())
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
In [ ]:
%%time
# try on full data set
batch_size = 200 # works ok with ~1600 train examples
model_cnn.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history_cnn_fulldata = model_cnn.fit(full_train_data, full_train_labels,
epochs=75,
batch_size=batch_size,
validation_data=(full_test_data, full_test_labels),
verbose=1,
class_weight=fulldata_class_weight_dict)
#callbacks=[early_stopping])
Epoch 1/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 29s 3s/step - accuracy: 0.1857 - loss: 13.9270 - val_accuracy: 0.1008 - val_loss: 2.5425 Epoch 2/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 34s 4s/step - accuracy: 0.1408 - loss: 2.3883 - val_accuracy: 0.1990 - val_loss: 2.3737 Epoch 3/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.2626 - loss: 1.9876 - val_accuracy: 0.3577 - val_loss: 2.0984 Epoch 4/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.4409 - loss: 1.5618 - val_accuracy: 0.2771 - val_loss: 2.3033 Epoch 5/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 37s 5s/step - accuracy: 0.5470 - loss: 1.1760 - val_accuracy: 0.4005 - val_loss: 2.1168 Epoch 6/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.6237 - loss: 0.9143 - val_accuracy: 0.3552 - val_loss: 2.3220 Epoch 7/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 40s 5s/step - accuracy: 0.7064 - loss: 0.6362 - val_accuracy: 0.3627 - val_loss: 2.2622 Epoch 8/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.7773 - loss: 0.5260 - val_accuracy: 0.4282 - val_loss: 2.2247 Epoch 9/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.8552 - loss: 0.3369 - val_accuracy: 0.4081 - val_loss: 3.1048 Epoch 10/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 37s 5s/step - accuracy: 0.8739 - loss: 0.2284 - val_accuracy: 0.4383 - val_loss: 3.3123 Epoch 11/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 40s 5s/step - accuracy: 0.9196 - loss: 0.1684 - val_accuracy: 0.4332 - val_loss: 4.2457 Epoch 12/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 37s 5s/step - accuracy: 0.8231 - loss: 0.3791 - val_accuracy: 0.4635 - val_loss: 3.1397 Epoch 13/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9457 - loss: 0.1103 - val_accuracy: 0.4736 - val_loss: 3.5993 Epoch 14/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9595 - loss: 0.0814 - val_accuracy: 0.3703 - val_loss: 3.4169 Epoch 15/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9067 - loss: 0.2352 - val_accuracy: 0.5139 - val_loss: 3.7823 Epoch 16/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9660 - loss: 0.0665 - val_accuracy: 0.4710 - val_loss: 3.9261 Epoch 17/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9783 - loss: 0.0437 - val_accuracy: 0.5038 - val_loss: 4.0354 Epoch 18/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9866 - loss: 0.0375 - val_accuracy: 0.4635 - val_loss: 4.5449 Epoch 19/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9520 - loss: 0.1813 - val_accuracy: 0.4484 - val_loss: 3.8488 Epoch 20/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9728 - loss: 0.0634 - val_accuracy: 0.4786 - val_loss: 4.4557 Epoch 21/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9868 - loss: 0.0371 - val_accuracy: 0.4736 - val_loss: 4.2654 Epoch 22/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 40s 5s/step - accuracy: 0.9837 - loss: 0.0247 - val_accuracy: 0.5088 - val_loss: 4.4920 Epoch 23/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9901 - loss: 0.0175 - val_accuracy: 0.5088 - val_loss: 4.6278 Epoch 24/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9949 - loss: 0.0183 - val_accuracy: 0.4736 - val_loss: 5.3981 Epoch 25/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9765 - loss: 0.0917 - val_accuracy: 0.4736 - val_loss: 4.5577 Epoch 26/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 41s 5s/step - accuracy: 0.9796 - loss: 0.0535 - val_accuracy: 0.4811 - val_loss: 4.6575 Epoch 27/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9899 - loss: 0.0266 - val_accuracy: 0.4811 - val_loss: 5.1118 Epoch 28/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9660 - loss: 0.0818 - val_accuracy: 0.4610 - val_loss: 4.7762 Epoch 29/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9820 - loss: 0.1083 - val_accuracy: 0.5013 - val_loss: 4.7231 Epoch 30/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9950 - loss: 0.0080 - val_accuracy: 0.4761 - val_loss: 5.2342 Epoch 31/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9951 - loss: 0.0170 - val_accuracy: 0.4962 - val_loss: 5.6483 Epoch 32/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9437 - loss: 0.1678 - val_accuracy: 0.5013 - val_loss: 4.8769 Epoch 33/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 40s 5s/step - accuracy: 0.9846 - loss: 0.0329 - val_accuracy: 0.5063 - val_loss: 5.0116 Epoch 34/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9917 - loss: 0.0208 - val_accuracy: 0.4887 - val_loss: 4.9226 Epoch 35/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9959 - loss: 0.0161 - val_accuracy: 0.5164 - val_loss: 5.1189 Epoch 36/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 40s 5s/step - accuracy: 0.9957 - loss: 0.0110 - val_accuracy: 0.5214 - val_loss: 5.5120 Epoch 37/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9960 - loss: 0.0064 - val_accuracy: 0.4836 - val_loss: 5.8580 Epoch 38/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9865 - loss: 0.0359 - val_accuracy: 0.3703 - val_loss: 7.1033 Epoch 39/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.8865 - loss: 0.5772 - val_accuracy: 0.4861 - val_loss: 5.5791 Epoch 40/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9915 - loss: 0.0137 - val_accuracy: 0.4761 - val_loss: 5.4121 Epoch 41/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 41s 5s/step - accuracy: 0.9958 - loss: 0.0154 - val_accuracy: 0.5038 - val_loss: 5.2829 Epoch 42/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9946 - loss: 0.0157 - val_accuracy: 0.5063 - val_loss: 5.2631 Epoch 43/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9936 - loss: 0.0167 - val_accuracy: 0.5038 - val_loss: 4.8388 Epoch 44/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9881 - loss: 0.0330 - val_accuracy: 0.5164 - val_loss: 5.2875 Epoch 45/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9948 - loss: 0.0155 - val_accuracy: 0.4685 - val_loss: 4.8952 Epoch 46/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9776 - loss: 0.0617 - val_accuracy: 0.5139 - val_loss: 4.9534 Epoch 47/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9907 - loss: 0.0226 - val_accuracy: 0.5088 - val_loss: 5.6807 Epoch 48/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9956 - loss: 0.0084 - val_accuracy: 0.4887 - val_loss: 5.4805 Epoch 49/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9903 - loss: 0.0212 - val_accuracy: 0.5038 - val_loss: 6.5255 Epoch 50/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9898 - loss: 0.0273 - val_accuracy: 0.4912 - val_loss: 5.5960 Epoch 51/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9916 - loss: 0.0159 - val_accuracy: 0.4937 - val_loss: 5.8296 Epoch 52/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9929 - loss: 0.0247 - val_accuracy: 0.3955 - val_loss: 7.5822 Epoch 53/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9750 - loss: 0.0493 - val_accuracy: 0.5038 - val_loss: 6.1815 Epoch 54/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 40s 5s/step - accuracy: 0.9940 - loss: 0.0286 - val_accuracy: 0.4937 - val_loss: 6.1582 Epoch 55/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9956 - loss: 0.0093 - val_accuracy: 0.4987 - val_loss: 6.3134 Epoch 56/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9963 - loss: 0.0107 - val_accuracy: 0.5063 - val_loss: 5.9802 Epoch 57/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9986 - loss: 0.0050 - val_accuracy: 0.4912 - val_loss: 6.1489 Epoch 58/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9801 - loss: 0.0834 - val_accuracy: 0.4685 - val_loss: 6.2587 Epoch 59/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9905 - loss: 0.0137 - val_accuracy: 0.5164 - val_loss: 6.4145 Epoch 60/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9977 - loss: 0.0035 - val_accuracy: 0.5164 - val_loss: 6.7196 Epoch 61/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 42s 5s/step - accuracy: 0.9949 - loss: 0.0125 - val_accuracy: 0.5214 - val_loss: 6.4129 Epoch 62/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 40s 5s/step - accuracy: 0.9981 - loss: 0.0060 - val_accuracy: 0.5290 - val_loss: 6.8986 Epoch 63/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9981 - loss: 0.0042 - val_accuracy: 0.5038 - val_loss: 7.5024 Epoch 64/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9968 - loss: 0.0063 - val_accuracy: 0.5063 - val_loss: 7.3178 Epoch 65/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 41s 5s/step - accuracy: 0.9870 - loss: 0.0361 - val_accuracy: 0.5063 - val_loss: 6.5512 Epoch 66/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9494 - loss: 0.1947 - val_accuracy: 0.4962 - val_loss: 6.8330 Epoch 67/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9963 - loss: 0.0269 - val_accuracy: 0.4736 - val_loss: 6.6368 Epoch 68/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9909 - loss: 0.0142 - val_accuracy: 0.4710 - val_loss: 6.4832 Epoch 69/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 41s 5s/step - accuracy: 0.9978 - loss: 0.0070 - val_accuracy: 0.4912 - val_loss: 6.5873 Epoch 70/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9935 - loss: 0.0507 - val_accuracy: 0.4282 - val_loss: 6.2496 Epoch 71/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9918 - loss: 0.0109 - val_accuracy: 0.5013 - val_loss: 6.0225 Epoch 72/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9903 - loss: 0.0255 - val_accuracy: 0.4887 - val_loss: 6.3386 Epoch 73/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 38s 5s/step - accuracy: 0.9929 - loss: 0.0175 - val_accuracy: 0.4887 - val_loss: 5.9234 Epoch 74/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 41s 5s/step - accuracy: 0.9924 - loss: 0.0112 - val_accuracy: 0.5088 - val_loss: 6.3466 Epoch 75/75 8/8 ━━━━━━━━━━━━━━━━━━━━ 39s 5s/step - accuracy: 0.9915 - loss: 0.0220 - val_accuracy: 0.4635 - val_loss: 9.7672
--------------------------------------------------------------------------- NameError Traceback (most recent call last) File <timed exec>:18 NameError: name 'model_cnn_fulldata' is not defined
In [48]:
test_pred = model_cnn.predict(full_test_data)
y_true_classes = np.argmax(full_test_labels, axis=1)
y_pred_classes = np.argmax(test_pred, axis=1)
test_accuracy = accuracy_score(y_true_classes, y_pred_classes)
precision, recall, f1, _ = precision_recall_fscore_support(y_true_classes, y_pred_classes, average='macro')
print(f"Test Accuracy: {test_accuracy*100:.2f}%")
print(f"Average Test Precision: {precision:.3f}")
print(f"Average Test Recall: {recall:.3f}")
print(f"Average Test F1-score: {f1:.3f}")
13/13 ━━━━━━━━━━━━━━━━━━━━ 2s 141ms/step Test Accuracy: 46.35% Average Test Precision: 0.419 Average Test Recall: 0.372 Average Test F1-score: 0.362
In [50]:
plt.plot(history_cnn_fulldata.history['loss'])
plt.plot(history_cnn_fulldata.history['val_loss'])
plt.title('Loss vs Epoch for a CNN')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['loss', 'val_loss'], loc='upper right')
plt.show()
plt.plot(history_cnn_fulldata.history['accuracy'])
plt.plot(history_cnn_fulldata.history['val_accuracy'])
plt.title('Accuracy vs Epoch for a CNN')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['accuracy', 'val_accuracy'], loc='lower right')
plt.show()
In [ ]:
%%time
batch_size = 30 # works of with ~170 train examples
model_cnn.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history_cnn = model_cnn.fit(train_data, train_labels,
epochs=50,
batch_size=batch_size,
validation_data=(test_data, test_labels),
verbose=1,
class_weight=class_weight_dict)
#callbacks=[early_stopping])
test_pred = model_cnn.predict(test_data)
y_true_classes = np.argmax(test_labels, axis=1)
y_pred_classes = np.argmax(test_pred, axis=1)
test_accuracy = accuracy_score(y_true_classes, y_pred_classes)
precision, recall, f1, _ = precision_recall_fscore_support(y_true_classes, y_pred_classes, average='macro')
print(f"Test Accuracy: {test_accuracy*100:.2f}%")
print(f"Average Test Precision: {precision:.3f}")
print(f"Average Test Recall: {recall:.3f}")
print(f"Average Test F1-score: {f1:.3f}")
Epoch 1/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 569ms/step - accuracy: 0.0780 - loss: 92.7090 - val_accuracy: 0.1163 - val_loss: 2.5369 Epoch 2/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 519ms/step - accuracy: 0.0572 - loss: 2.7760 - val_accuracy: 0.0698 - val_loss: 2.4770 Epoch 3/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 501ms/step - accuracy: 0.1414 - loss: 2.4274 - val_accuracy: 0.0465 - val_loss: 2.6607 Epoch 4/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 502ms/step - accuracy: 0.0666 - loss: 2.4814 - val_accuracy: 0.0698 - val_loss: 2.4864 Epoch 5/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 503ms/step - accuracy: 0.0726 - loss: 2.5729 - val_accuracy: 0.0465 - val_loss: 2.6081 Epoch 6/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 498ms/step - accuracy: 0.1008 - loss: 3.1258 - val_accuracy: 0.0930 - val_loss: 2.5377 Epoch 7/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 503ms/step - accuracy: 0.1072 - loss: 2.6995 - val_accuracy: 0.0930 - val_loss: 2.3928 Epoch 8/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 504ms/step - accuracy: 0.1612 - loss: 2.4057 - val_accuracy: 0.1395 - val_loss: 2.8972 Epoch 9/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 524ms/step - accuracy: 0.1544 - loss: 2.5532 - val_accuracy: 0.0465 - val_loss: 2.8260 Epoch 10/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 544ms/step - accuracy: 0.1376 - loss: 2.5372 - val_accuracy: 0.1395 - val_loss: 2.4416 Epoch 11/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 786ms/step - accuracy: 0.1759 - loss: 2.4317 - val_accuracy: 0.3023 - val_loss: 2.3853 Epoch 12/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 6s 953ms/step - accuracy: 0.3344 - loss: 2.3126 - val_accuracy: 0.1395 - val_loss: 2.9040 Epoch 13/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 768ms/step - accuracy: 0.3168 - loss: 2.1242 - val_accuracy: 0.1860 - val_loss: 2.6205 Epoch 14/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 728ms/step - accuracy: 0.2185 - loss: 4.6785 - val_accuracy: 0.0233 - val_loss: 5.0161 Epoch 15/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 700ms/step - accuracy: 0.4017 - loss: 2.2802 - val_accuracy: 0.3023 - val_loss: 2.5248 Epoch 16/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 689ms/step - accuracy: 0.5784 - loss: 1.3698 - val_accuracy: 0.1860 - val_loss: 3.7069 Epoch 17/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 783ms/step - accuracy: 0.7136 - loss: 0.8240 - val_accuracy: 0.3023 - val_loss: 3.3723 Epoch 18/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 739ms/step - accuracy: 0.8228 - loss: 0.4773 - val_accuracy: 0.2558 - val_loss: 3.6925 Epoch 19/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 756ms/step - accuracy: 0.8377 - loss: 0.5360 - val_accuracy: 0.3023 - val_loss: 2.4880 Epoch 20/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 674ms/step - accuracy: 0.8645 - loss: 0.5301 - val_accuracy: 0.2558 - val_loss: 5.5456 Epoch 21/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 675ms/step - accuracy: 0.8862 - loss: 0.3208 - val_accuracy: 0.2326 - val_loss: 7.4683 Epoch 22/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 682ms/step - accuracy: 0.8800 - loss: 0.4464 - val_accuracy: 0.2791 - val_loss: 6.5395 Epoch 23/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 705ms/step - accuracy: 0.9281 - loss: 0.4304 - val_accuracy: 0.2558 - val_loss: 7.1902 Epoch 24/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 691ms/step - accuracy: 0.6841 - loss: 1.4721 - val_accuracy: 0.2326 - val_loss: 5.6331 Epoch 25/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 700ms/step - accuracy: 0.8632 - loss: 0.4717 - val_accuracy: 0.2791 - val_loss: 5.9387 Epoch 26/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 728ms/step - accuracy: 0.9825 - loss: 0.0679 - val_accuracy: 0.2326 - val_loss: 7.8973 Epoch 27/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 742ms/step - accuracy: 0.9858 - loss: 0.0260 - val_accuracy: 0.2558 - val_loss: 8.5893 Epoch 28/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 741ms/step - accuracy: 0.9922 - loss: 0.0112 - val_accuracy: 0.2558 - val_loss: 9.3847 Epoch 29/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 763ms/step - accuracy: 0.9922 - loss: 0.0084 - val_accuracy: 0.2558 - val_loss: 9.5937 Epoch 30/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 726ms/step - accuracy: 0.9962 - loss: 0.0058 - val_accuracy: 0.3023 - val_loss: 10.2448 Epoch 31/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 758ms/step - accuracy: 0.9908 - loss: 0.0109 - val_accuracy: 0.2093 - val_loss: 14.8648 Epoch 32/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 770ms/step - accuracy: 0.8780 - loss: 1.0122 - val_accuracy: 0.2558 - val_loss: 8.8758 Epoch 33/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 757ms/step - accuracy: 0.7134 - loss: 3.4963 - val_accuracy: 0.2791 - val_loss: 5.9081 Epoch 34/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 756ms/step - accuracy: 0.9820 - loss: 0.0407 - val_accuracy: 0.3256 - val_loss: 6.8922 Epoch 35/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 747ms/step - accuracy: 0.9797 - loss: 0.0251 - val_accuracy: 0.2791 - val_loss: 7.6741 Epoch 36/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 749ms/step - accuracy: 0.9983 - loss: 0.0118 - val_accuracy: 0.2326 - val_loss: 7.0436 Epoch 37/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 795ms/step - accuracy: 0.9922 - loss: 0.0097 - val_accuracy: 0.3023 - val_loss: 8.3846 Epoch 38/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 753ms/step - accuracy: 1.0000 - loss: 0.0020 - val_accuracy: 0.3023 - val_loss: 8.7321 Epoch 39/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 782ms/step - accuracy: 1.0000 - loss: 0.0019 - val_accuracy: 0.3023 - val_loss: 9.1888 Epoch 40/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 769ms/step - accuracy: 1.0000 - loss: 0.0025 - val_accuracy: 0.3023 - val_loss: 9.8842 Epoch 41/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 739ms/step - accuracy: 1.0000 - loss: 1.9307e-04 - val_accuracy: 0.3023 - val_loss: 10.0036 Epoch 42/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 747ms/step - accuracy: 1.0000 - loss: 3.2543e-04 - val_accuracy: 0.3023 - val_loss: 10.4263 Epoch 43/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 778ms/step - accuracy: 1.0000 - loss: 3.6959e-04 - val_accuracy: 0.3023 - val_loss: 10.7751 Epoch 44/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 772ms/step - accuracy: 1.0000 - loss: 5.0325e-04 - val_accuracy: 0.2791 - val_loss: 10.6821 Epoch 45/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 747ms/step - accuracy: 1.0000 - loss: 4.3351e-04 - val_accuracy: 0.2791 - val_loss: 11.5582 Epoch 46/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 781ms/step - accuracy: 1.0000 - loss: 5.3703e-04 - val_accuracy: 0.3023 - val_loss: 12.7134 Epoch 47/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 773ms/step - accuracy: 0.8916 - loss: 0.2746 - val_accuracy: 0.1628 - val_loss: 22.0185 Epoch 48/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 735ms/step - accuracy: 0.7928 - loss: 1.3282 - val_accuracy: 0.2093 - val_loss: 9.9129 Epoch 49/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 747ms/step - accuracy: 0.9874 - loss: 0.0463 - val_accuracy: 0.2326 - val_loss: 12.3803 Epoch 50/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 747ms/step - accuracy: 1.0000 - loss: 0.0013 - val_accuracy: 0.2326 - val_loss: 12.7864 Epoch 51/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 747ms/step - accuracy: 1.0000 - loss: 9.5253e-04 - val_accuracy: 0.2326 - val_loss: 13.5181 Epoch 52/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 748ms/step - accuracy: 1.0000 - loss: 2.7622e-04 - val_accuracy: 0.2326 - val_loss: 14.2981 Epoch 53/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 719ms/step - accuracy: 1.0000 - loss: 2.1764e-04 - val_accuracy: 0.2558 - val_loss: 11.8933 Epoch 54/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 759ms/step - accuracy: 1.0000 - loss: 0.0021 - val_accuracy: 0.2093 - val_loss: 16.1805 Epoch 55/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 772ms/step - accuracy: 0.9773 - loss: 0.0352 - val_accuracy: 0.2791 - val_loss: 12.1109 Epoch 56/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 758ms/step - accuracy: 0.9962 - loss: 0.0460 - val_accuracy: 0.3256 - val_loss: 17.3451 Epoch 57/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 747ms/step - accuracy: 0.9983 - loss: 0.0308 - val_accuracy: 0.2791 - val_loss: 10.1700 Epoch 58/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 748ms/step - accuracy: 0.8669 - loss: 0.7452 - val_accuracy: 0.2558 - val_loss: 10.7633 Epoch 59/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 749ms/step - accuracy: 0.9836 - loss: 0.0434 - val_accuracy: 0.2326 - val_loss: 12.5911 Epoch 60/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 781ms/step - accuracy: 1.0000 - loss: 0.0025 - val_accuracy: 0.2558 - val_loss: 13.4472 Epoch 61/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 768ms/step - accuracy: 1.0000 - loss: 7.4470e-04 - val_accuracy: 0.2558 - val_loss: 13.8488 Epoch 62/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 739ms/step - accuracy: 1.0000 - loss: 2.0989e-04 - val_accuracy: 0.2558 - val_loss: 14.0786 Epoch 63/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 768ms/step - accuracy: 1.0000 - loss: 1.5711e-04 - val_accuracy: 0.2558 - val_loss: 14.3702 Epoch 64/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 767ms/step - accuracy: 1.0000 - loss: 9.2526e-04 - val_accuracy: 0.3256 - val_loss: 13.5232 Epoch 65/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 752ms/step - accuracy: 1.0000 - loss: 0.0012 - val_accuracy: 0.3256 - val_loss: 14.7276 Epoch 66/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 752ms/step - accuracy: 1.0000 - loss: 2.8343e-04 - val_accuracy: 0.2558 - val_loss: 15.9207 Epoch 67/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 765ms/step - accuracy: 1.0000 - loss: 3.0883e-04 - val_accuracy: 0.2558 - val_loss: 17.1738 Epoch 68/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 746ms/step - accuracy: 1.0000 - loss: 7.4278e-05 - val_accuracy: 0.2558 - val_loss: 17.4926 Epoch 69/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 740ms/step - accuracy: 1.0000 - loss: 4.3953e-05 - val_accuracy: 0.2558 - val_loss: 17.5205 Epoch 70/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 764ms/step - accuracy: 1.0000 - loss: 1.2536e-05 - val_accuracy: 0.2558 - val_loss: 17.7877 Epoch 71/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 761ms/step - accuracy: 1.0000 - loss: 1.6996e-05 - val_accuracy: 0.2558 - val_loss: 17.9955 Epoch 72/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 754ms/step - accuracy: 0.9940 - loss: 0.0111 - val_accuracy: 0.3721 - val_loss: 10.7679 Epoch 73/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 769ms/step - accuracy: 0.8987 - loss: 0.4961 - val_accuracy: 0.2093 - val_loss: 19.8969 Epoch 74/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 744ms/step - accuracy: 0.9836 - loss: 0.0201 - val_accuracy: 0.2558 - val_loss: 19.6341 Epoch 75/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 763ms/step - accuracy: 1.0000 - loss: 3.0226e-04 - val_accuracy: 0.2558 - val_loss: 20.2893 Epoch 76/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 739ms/step - accuracy: 1.0000 - loss: 0.0012 - val_accuracy: 0.2791 - val_loss: 23.0040 Epoch 77/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 762ms/step - accuracy: 1.0000 - loss: 6.1520e-04 - val_accuracy: 0.2558 - val_loss: 23.4793 Epoch 78/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 737ms/step - accuracy: 1.0000 - loss: 2.1706e-04 - val_accuracy: 0.2791 - val_loss: 24.9633 Epoch 79/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 746ms/step - accuracy: 1.0000 - loss: 7.9265e-05 - val_accuracy: 0.2791 - val_loss: 25.5148 Epoch 80/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 760ms/step - accuracy: 1.0000 - loss: 9.8205e-06 - val_accuracy: 0.2791 - val_loss: 25.6089 Epoch 81/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 763ms/step - accuracy: 1.0000 - loss: 2.7593e-05 - val_accuracy: 0.2791 - val_loss: 25.7173 Epoch 82/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 751ms/step - accuracy: 1.0000 - loss: 6.8648e-05 - val_accuracy: 0.2558 - val_loss: 25.5938 Epoch 83/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 752ms/step - accuracy: 1.0000 - loss: 3.4383e-05 - val_accuracy: 0.2558 - val_loss: 25.9342 Epoch 84/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 829ms/step - accuracy: 1.0000 - loss: 2.4153e-05 - val_accuracy: 0.2558 - val_loss: 26.1752 Epoch 85/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 763ms/step - accuracy: 0.9957 - loss: 0.0081 - val_accuracy: 0.2326 - val_loss: 22.1206 Epoch 86/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 764ms/step - accuracy: 0.9841 - loss: 0.2155 - val_accuracy: 0.2558 - val_loss: 34.0840 Epoch 87/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 757ms/step - accuracy: 0.9836 - loss: 0.1154 - val_accuracy: 0.2326 - val_loss: 24.8390 Epoch 88/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 738ms/step - accuracy: 1.0000 - loss: 1.8875e-05 - val_accuracy: 0.2326 - val_loss: 24.8183 Epoch 89/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 756ms/step - accuracy: 1.0000 - loss: 5.7326e-05 - val_accuracy: 0.2326 - val_loss: 24.9315 Epoch 90/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 747ms/step - accuracy: 1.0000 - loss: 1.6614e-05 - val_accuracy: 0.2326 - val_loss: 24.9060 Epoch 91/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 769ms/step - accuracy: 1.0000 - loss: 4.7476e-05 - val_accuracy: 0.2326 - val_loss: 25.0333 Epoch 92/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 753ms/step - accuracy: 1.0000 - loss: 5.4755e-05 - val_accuracy: 0.2326 - val_loss: 24.9022 Epoch 93/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 759ms/step - accuracy: 1.0000 - loss: 4.4196e-05 - val_accuracy: 0.2791 - val_loss: 25.3543 Epoch 94/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 815ms/step - accuracy: 1.0000 - loss: 5.1089e-05 - val_accuracy: 0.2791 - val_loss: 25.5627 Epoch 95/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 811ms/step - accuracy: 1.0000 - loss: 7.7421e-05 - val_accuracy: 0.2791 - val_loss: 27.0618 Epoch 96/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 793ms/step - accuracy: 1.0000 - loss: 3.3726e-07 - val_accuracy: 0.2791 - val_loss: 27.0702 Epoch 97/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 791ms/step - accuracy: 1.0000 - loss: 1.1676e-06 - val_accuracy: 0.2791 - val_loss: 27.0938 Epoch 98/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 784ms/step - accuracy: 1.0000 - loss: 2.3433e-04 - val_accuracy: 0.2326 - val_loss: 35.7764 Epoch 99/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 782ms/step - accuracy: 1.0000 - loss: 8.1328e-08 - val_accuracy: 0.2326 - val_loss: 35.7695 Epoch 100/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 788ms/step - accuracy: 1.0000 - loss: 4.7887e-07 - val_accuracy: 0.2326 - val_loss: 35.7693 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 215ms/step Test Accuracy: 23.26% Average Test Precision: 0.233 Average Test Recall: 0.186 Average Test F1-score: 0.199 CPU times: total: 1h 30min 22s Wall time: 7min 23s
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
In [11]:
plt.plot(history_cnn.history['loss'])
plt.plot(history_cnn.history['val_loss'])
plt.title('Loss vs Epoch for a CNN')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['loss', 'val_loss'], loc='upper right')
plt.show()
plt.plot(history_cnn.history['accuracy'])
plt.plot(history_cnn.history['val_accuracy'])
plt.title('Accuracy vs Epoch for a CNN')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['accuracy', 'val_accuracy'], loc='lower right')
plt.show()
In [12]:
print(y_true_classes)
print(y_pred_classes)
print(eda_data_keys[6])
[10 2 6 8 6 9 1 6 7 6 8 6 1 6 3 8 8 5 5 9 8 6 6 2 8 8 10 1 1 6 6 6 9 1 2 9 2 9 4 6 7 6 4] [ 2 6 5 1 1 9 3 5 7 6 9 9 9 3 8 6 8 6 6 11 6 6 3 11 9 9 1 1 6 10 9 6 3 6 6 7 8 9 11 8 6 6 4] houspa
In [28]:
# try using a recurrent neural network on the amplitude vector for each moment in time
# input shape: 128
# Define model
dropout_rate = 0.3
model_rnn = Sequential([
LSTM(64, return_sequences=True, input_shape=(517, 128)),
Dropout(dropout_rate),
LSTM(128),
Dense(512, activation="relu"),
Dropout(dropout_rate),
Dense(12, activation="softmax") # Final output (12 classes)
])
# Compile model
model_rnn.compile(
optimizer="rmsprop",
loss="categorical_crossentropy",
metrics=["accuracy"]
)
print(model_rnn.summary())
# reshape/reformat inputs
train_data = np.transpose(eda_data[:,:, train_indices], (2, 1, 0))
train_labels = to_categorical(eda_data_labels[train_indices], 12)
test_data = np.transpose(eda_data[:,:, test_indices], (2, 1, 0))
test_labels = to_categorical(eda_data_labels[test_indices], 12)
Model: "sequential_6"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ lstm_7 (LSTM) │ (None, 517, 64) │ 49,408 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout_7 (Dropout) │ (None, 517, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ lstm_8 (LSTM) │ (None, 128) │ 98,816 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_12 (Dense) │ (None, 512) │ 66,048 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout_8 (Dropout) │ (None, 512) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_13 (Dense) │ (None, 12) │ 6,156 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 220,428 (861.05 KB)
Trainable params: 220,428 (861.05 KB)
Non-trainable params: 0 (0.00 B)
None
In [29]:
history_rnn = model_rnn.fit(train_data, train_labels, validation_data=(test_data, test_labels),
batch_size=batch_size,
epochs=100,
class_weight=class_weight_dict)
#callbacks=[early_stopping])
test_pred = model_rnn.predict(test_data)
y_true_classes = np.argmax(test_labels, axis=1)
y_pred_classes = np.argmax(test_pred, axis=1)
test_accuracy = accuracy_score(y_true_classes, y_pred_classes)
precision, recall, f1, _ = precision_recall_fscore_support(y_true_classes, y_pred_classes, average='macro')
print(f"Test Accuracy: {test_accuracy*100:.2f}%")
print(f"Average Test Precision: {precision:.3f}")
print(f"Average Test Recall: {recall:.3f}")
print(f"Average Test F1-score: {f1:.3f}")
Epoch 1/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 301ms/step - accuracy: 0.0382 - loss: 2.5099 - val_accuracy: 0.1163 - val_loss: 2.4965 Epoch 2/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 236ms/step - accuracy: 0.0622 - loss: 2.5183 - val_accuracy: 0.0465 - val_loss: 2.4849 Epoch 3/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 245ms/step - accuracy: 0.1187 - loss: 2.3908 - val_accuracy: 0.0233 - val_loss: 2.5162 Epoch 4/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 272ms/step - accuracy: 0.1161 - loss: 2.5134 - val_accuracy: 0.0465 - val_loss: 2.4154 Epoch 5/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 273ms/step - accuracy: 0.0868 - loss: 2.4987 - val_accuracy: 0.0465 - val_loss: 2.4534 Epoch 6/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 262ms/step - accuracy: 0.1255 - loss: 2.5056 - val_accuracy: 0.0698 - val_loss: 2.4753 Epoch 7/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 260ms/step - accuracy: 0.1206 - loss: 2.4498 - val_accuracy: 0.1163 - val_loss: 2.4244 Epoch 8/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 270ms/step - accuracy: 0.0682 - loss: 2.4863 - val_accuracy: 0.0930 - val_loss: 2.4663 Epoch 9/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 273ms/step - accuracy: 0.1436 - loss: 2.3775 - val_accuracy: 0.0465 - val_loss: 2.5300 Epoch 10/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 274ms/step - accuracy: 0.0572 - loss: 2.4214 - val_accuracy: 0.0698 - val_loss: 2.4325 Epoch 11/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 275ms/step - accuracy: 0.1624 - loss: 2.4279 - val_accuracy: 0.0465 - val_loss: 2.7341 Epoch 12/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 291ms/step - accuracy: 0.1172 - loss: 2.5055 - val_accuracy: 0.0930 - val_loss: 2.4316 Epoch 13/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 283ms/step - accuracy: 0.2315 - loss: 2.2726 - val_accuracy: 0.0465 - val_loss: 2.6668 Epoch 14/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 280ms/step - accuracy: 0.1288 - loss: 2.3566 - val_accuracy: 0.2326 - val_loss: 2.3585 Epoch 15/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 269ms/step - accuracy: 0.1605 - loss: 2.3424 - val_accuracy: 0.1163 - val_loss: 2.5614 Epoch 16/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 343ms/step - accuracy: 0.1999 - loss: 2.2403 - val_accuracy: 0.0930 - val_loss: 2.6481 Epoch 17/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 330ms/step - accuracy: 0.1552 - loss: 2.3758 - val_accuracy: 0.0465 - val_loss: 2.5428 Epoch 18/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 313ms/step - accuracy: 0.1965 - loss: 2.2837 - val_accuracy: 0.1163 - val_loss: 2.5391 Epoch 19/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 337ms/step - accuracy: 0.1675 - loss: 2.2203 - val_accuracy: 0.0930 - val_loss: 2.6011 Epoch 20/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 332ms/step - accuracy: 0.1319 - loss: 2.4118 - val_accuracy: 0.1163 - val_loss: 2.5690 Epoch 21/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 327ms/step - accuracy: 0.2184 - loss: 2.2242 - val_accuracy: 0.0465 - val_loss: 2.8293 Epoch 22/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 349ms/step - accuracy: 0.1577 - loss: 2.2864 - val_accuracy: 0.1628 - val_loss: 2.5158 Epoch 23/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 340ms/step - accuracy: 0.1788 - loss: 2.3375 - val_accuracy: 0.0930 - val_loss: 2.5995 Epoch 24/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 370ms/step - accuracy: 0.2113 - loss: 2.2458 - val_accuracy: 0.1163 - val_loss: 2.5716 Epoch 25/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 414ms/step - accuracy: 0.1931 - loss: 2.2180 - val_accuracy: 0.1860 - val_loss: 2.3937 Epoch 26/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 335ms/step - accuracy: 0.2462 - loss: 2.3088 - val_accuracy: 0.0930 - val_loss: 2.4843 Epoch 27/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 289ms/step - accuracy: 0.1628 - loss: 2.4313 - val_accuracy: 0.0465 - val_loss: 2.6610 Epoch 28/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 302ms/step - accuracy: 0.1286 - loss: 2.4079 - val_accuracy: 0.1860 - val_loss: 2.5347 Epoch 29/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 315ms/step - accuracy: 0.2186 - loss: 2.1922 - val_accuracy: 0.0698 - val_loss: 2.5936 Epoch 30/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 297ms/step - accuracy: 0.1852 - loss: 2.2531 - val_accuracy: 0.0465 - val_loss: 2.7920 Epoch 31/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 337ms/step - accuracy: 0.1017 - loss: 2.3015 - val_accuracy: 0.1628 - val_loss: 2.5202 Epoch 32/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 373ms/step - accuracy: 0.2751 - loss: 2.2133 - val_accuracy: 0.2093 - val_loss: 2.4259 Epoch 33/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 383ms/step - accuracy: 0.2514 - loss: 2.1833 - val_accuracy: 0.0698 - val_loss: 2.7630 Epoch 34/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 362ms/step - accuracy: 0.2242 - loss: 2.2573 - val_accuracy: 0.1628 - val_loss: 2.3628 Epoch 35/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 326ms/step - accuracy: 0.2079 - loss: 2.2565 - val_accuracy: 0.1163 - val_loss: 2.6800 Epoch 36/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 323ms/step - accuracy: 0.2004 - loss: 2.3303 - val_accuracy: 0.1860 - val_loss: 2.4773 Epoch 37/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 327ms/step - accuracy: 0.2084 - loss: 2.1680 - val_accuracy: 0.2093 - val_loss: 2.5183 Epoch 38/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 318ms/step - accuracy: 0.2856 - loss: 1.9982 - val_accuracy: 0.1628 - val_loss: 2.4877 Epoch 39/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 323ms/step - accuracy: 0.2597 - loss: 2.1898 - val_accuracy: 0.1395 - val_loss: 2.4946 Epoch 40/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 330ms/step - accuracy: 0.1787 - loss: 2.2008 - val_accuracy: 0.0698 - val_loss: 2.7530 Epoch 41/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 334ms/step - accuracy: 0.2364 - loss: 2.1548 - val_accuracy: 0.0698 - val_loss: 2.7832 Epoch 42/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 369ms/step - accuracy: 0.2318 - loss: 2.3890 - val_accuracy: 0.1860 - val_loss: 2.4477 Epoch 43/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 320ms/step - accuracy: 0.3128 - loss: 2.0999 - val_accuracy: 0.2558 - val_loss: 2.4140 Epoch 44/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 294ms/step - accuracy: 0.2460 - loss: 2.2195 - val_accuracy: 0.2326 - val_loss: 2.4805 Epoch 45/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 295ms/step - accuracy: 0.2334 - loss: 2.1015 - val_accuracy: 0.1395 - val_loss: 2.4925 Epoch 46/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 308ms/step - accuracy: 0.2322 - loss: 2.1012 - val_accuracy: 0.2326 - val_loss: 2.4245 Epoch 47/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 295ms/step - accuracy: 0.2818 - loss: 2.1302 - val_accuracy: 0.1628 - val_loss: 2.5815 Epoch 48/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 301ms/step - accuracy: 0.2933 - loss: 2.1492 - val_accuracy: 0.1628 - val_loss: 2.8144 Epoch 49/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 314ms/step - accuracy: 0.2846 - loss: 1.9832 - val_accuracy: 0.1395 - val_loss: 2.6235 Epoch 50/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 331ms/step - accuracy: 0.1944 - loss: 1.9685 - val_accuracy: 0.1628 - val_loss: 2.4287 Epoch 51/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 346ms/step - accuracy: 0.2988 - loss: 2.1455 - val_accuracy: 0.2093 - val_loss: 2.3293 Epoch 52/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 305ms/step - accuracy: 0.3371 - loss: 1.8534 - val_accuracy: 0.0930 - val_loss: 2.7673 Epoch 53/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 378ms/step - accuracy: 0.2923 - loss: 1.9358 - val_accuracy: 0.1395 - val_loss: 2.5280 Epoch 54/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 407ms/step - accuracy: 0.2679 - loss: 2.0890 - val_accuracy: 0.2326 - val_loss: 2.4915 Epoch 55/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 340ms/step - accuracy: 0.2608 - loss: 2.1140 - val_accuracy: 0.1163 - val_loss: 2.8054 Epoch 56/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 324ms/step - accuracy: 0.1983 - loss: 1.9707 - val_accuracy: 0.1628 - val_loss: 2.4758 Epoch 57/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 363ms/step - accuracy: 0.3305 - loss: 1.9515 - val_accuracy: 0.1860 - val_loss: 2.5922 Epoch 58/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 341ms/step - accuracy: 0.4039 - loss: 1.8807 - val_accuracy: 0.1860 - val_loss: 2.6129 Epoch 59/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 342ms/step - accuracy: 0.2465 - loss: 1.9345 - val_accuracy: 0.1163 - val_loss: 2.8994 Epoch 60/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 310ms/step - accuracy: 0.2306 - loss: 1.9927 - val_accuracy: 0.0465 - val_loss: 2.9113 Epoch 61/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 301ms/step - accuracy: 0.2623 - loss: 2.0936 - val_accuracy: 0.2326 - val_loss: 2.4417 Epoch 62/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 305ms/step - accuracy: 0.3116 - loss: 2.0249 - val_accuracy: 0.1395 - val_loss: 2.8210 Epoch 63/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 291ms/step - accuracy: 0.3620 - loss: 1.7752 - val_accuracy: 0.0698 - val_loss: 2.6806 Epoch 64/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 296ms/step - accuracy: 0.2942 - loss: 1.8975 - val_accuracy: 0.2326 - val_loss: 2.2703 Epoch 65/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 299ms/step - accuracy: 0.3029 - loss: 2.0719 - val_accuracy: 0.1628 - val_loss: 2.4931 Epoch 66/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 291ms/step - accuracy: 0.3033 - loss: 1.9139 - val_accuracy: 0.1395 - val_loss: 2.4696 Epoch 67/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 293ms/step - accuracy: 0.2855 - loss: 1.8469 - val_accuracy: 0.0698 - val_loss: 2.8588 Epoch 68/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 290ms/step - accuracy: 0.2467 - loss: 1.9048 - val_accuracy: 0.2558 - val_loss: 2.2975 Epoch 69/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 310ms/step - accuracy: 0.2763 - loss: 1.8571 - val_accuracy: 0.1395 - val_loss: 2.5585 Epoch 70/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 338ms/step - accuracy: 0.3210 - loss: 1.8913 - val_accuracy: 0.1395 - val_loss: 2.5075 Epoch 71/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 344ms/step - accuracy: 0.3926 - loss: 1.8480 - val_accuracy: 0.2791 - val_loss: 2.3717 Epoch 72/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 357ms/step - accuracy: 0.3494 - loss: 1.8230 - val_accuracy: 0.1163 - val_loss: 2.9170 Epoch 73/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 386ms/step - accuracy: 0.3463 - loss: 1.8701 - val_accuracy: 0.2093 - val_loss: 2.6571 Epoch 74/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 388ms/step - accuracy: 0.3869 - loss: 1.6693 - val_accuracy: 0.1860 - val_loss: 2.5688 Epoch 75/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 348ms/step - accuracy: 0.2291 - loss: 2.1340 - val_accuracy: 0.1163 - val_loss: 2.8852 Epoch 76/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 370ms/step - accuracy: 0.3696 - loss: 1.7591 - val_accuracy: 0.1860 - val_loss: 2.5157 Epoch 77/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 358ms/step - accuracy: 0.3348 - loss: 1.8275 - val_accuracy: 0.1860 - val_loss: 2.5570 Epoch 78/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 343ms/step - accuracy: 0.3396 - loss: 1.7463 - val_accuracy: 0.0698 - val_loss: 2.8595 Epoch 79/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 350ms/step - accuracy: 0.2769 - loss: 1.9938 - val_accuracy: 0.1395 - val_loss: 3.0407 Epoch 80/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 330ms/step - accuracy: 0.3791 - loss: 1.8286 - val_accuracy: 0.2093 - val_loss: 2.5807 Epoch 81/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 294ms/step - accuracy: 0.4457 - loss: 1.6127 - val_accuracy: 0.2093 - val_loss: 2.5583 Epoch 82/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 320ms/step - accuracy: 0.3796 - loss: 1.6692 - val_accuracy: 0.2558 - val_loss: 2.5516 Epoch 83/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 317ms/step - accuracy: 0.4313 - loss: 1.5266 - val_accuracy: 0.2093 - val_loss: 2.6331 Epoch 84/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 355ms/step - accuracy: 0.3918 - loss: 1.7464 - val_accuracy: 0.2558 - val_loss: 2.4453 Epoch 85/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 336ms/step - accuracy: 0.3936 - loss: 1.6295 - val_accuracy: 0.1395 - val_loss: 2.7494 Epoch 86/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 358ms/step - accuracy: 0.4305 - loss: 1.7084 - val_accuracy: 0.1628 - val_loss: 2.5005 Epoch 87/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 372ms/step - accuracy: 0.3431 - loss: 1.9161 - val_accuracy: 0.1628 - val_loss: 2.7776 Epoch 88/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 303ms/step - accuracy: 0.3296 - loss: 1.6762 - val_accuracy: 0.2558 - val_loss: 2.3369 Epoch 89/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 292ms/step - accuracy: 0.4016 - loss: 1.6133 - val_accuracy: 0.1395 - val_loss: 2.7736 Epoch 90/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 291ms/step - accuracy: 0.4356 - loss: 1.6163 - val_accuracy: 0.2558 - val_loss: 2.5090 Epoch 91/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 290ms/step - accuracy: 0.3942 - loss: 1.5989 - val_accuracy: 0.1860 - val_loss: 2.6787 Epoch 92/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 304ms/step - accuracy: 0.3980 - loss: 1.6308 - val_accuracy: 0.1163 - val_loss: 2.9395 Epoch 93/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 301ms/step - accuracy: 0.4021 - loss: 1.5637 - val_accuracy: 0.1860 - val_loss: 2.8319 Epoch 94/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 300ms/step - accuracy: 0.3437 - loss: 1.8115 - val_accuracy: 0.2093 - val_loss: 2.3800 Epoch 95/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 288ms/step - accuracy: 0.4526 - loss: 1.5197 - val_accuracy: 0.2326 - val_loss: 2.7008 Epoch 96/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 282ms/step - accuracy: 0.3650 - loss: 1.5369 - val_accuracy: 0.1860 - val_loss: 2.6473 Epoch 97/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 280ms/step - accuracy: 0.3013 - loss: 1.5872 - val_accuracy: 0.1860 - val_loss: 2.7200 Epoch 98/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 287ms/step - accuracy: 0.4189 - loss: 1.4612 - val_accuracy: 0.2558 - val_loss: 2.7339 Epoch 99/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 293ms/step - accuracy: 0.4669 - loss: 1.2654 - val_accuracy: 0.1163 - val_loss: 2.7456 Epoch 100/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 288ms/step - accuracy: 0.4657 - loss: 1.3557 - val_accuracy: 0.1860 - val_loss: 3.0515 2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 221ms/step Test Accuracy: 18.60% Average Test Precision: 0.156 Average Test Recall: 0.122 Average Test F1-score: 0.116
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
In [30]:
plt.plot(history_rnn.history['loss'])
plt.plot(history_rnn.history['val_loss'])
plt.title('Loss vs Epoch for a RNN')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['loss', 'val_loss'], loc='upper right')
plt.show()
plt.plot(history_rnn.history['accuracy'])
plt.plot(history_rnn.history['val_accuracy'])
plt.title('Accuracy vs Epoch for a RNN')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['accuracy', 'val_accuracy'], loc='lower right')
plt.show()
In [16]:
y_pred_classes
Out[16]:
array([10, 8, 1, 3, 3, 2, 3, 8, 3, 3, 5, 9, 9, 9, 1, 10, 5,
3, 6, 3, 2, 3, 9, 2, 2, 9, 3, 6, 10, 3, 3, 6, 10, 3,
8, 9, 9, 3, 2, 2, 6, 2, 10])
In [19]:
# try adding a 1d convolutional filter to the RNN beforehand
# Define model without pooling first
dropout_rate = 0.5
model_conv_rnn = Sequential([
Conv1D(filters=64, kernel_size=3, input_shape=(517,128)),
LSTM(64, return_sequences=True),
Dropout(dropout_rate),
LSTM(128),
Dense(512, activation="relu"),
Dense(12, activation="softmax") # Final output (12 classes)
])
# Compile model
model_conv_rnn.compile(
optimizer="rmsprop",
loss="categorical_crossentropy",
metrics=["accuracy"]
)
print(model_conv_rnn.summary())
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Model: "sequential_2"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ conv1d (Conv1D) │ (None, 515, 64) │ 24,640 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ lstm_2 (LSTM) │ (None, 515, 64) │ 33,024 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout_2 (Dropout) │ (None, 515, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ lstm_3 (LSTM) │ (None, 128) │ 98,816 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_4 (Dense) │ (None, 512) │ 66,048 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_5 (Dense) │ (None, 12) │ 6,156 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 228,684 (893.30 KB)
Trainable params: 228,684 (893.30 KB)
Non-trainable params: 0 (0.00 B)
None
In [ ]:
history_conv_rnn = model_conv_rnn.fit(train_data, train_labels, validation_data=(test_data, test_labels),
batch_size=batch_size,
epochs=100,
class_weight=class_weight_dict)
#callbacks=[early_stopping])
test_pred = model_conv_rnn.predict(test_data)
y_true_classes = np.where(test_labels, axis=1)
y_pred_classes = np.argmax(test_pred, axis=1)
test_accuracy = accuracy_score(y_true_classes, y_pred_classes)
precision, recall, f1, _ = precision_recall_fscore_support(y_true_classes, y_pred_classes, average='macro')
print(f"Test Accuracy: {test_accuracy*100:.2f}%")
print(f"Average Test Precision: {precision:.3f}")
print(f"Average Test Recall: {recall:.3f}")
print(f"Average Test F1-score: {f1:.3f}")
Epoch 1/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 293ms/step - accuracy: 0.0773 - loss: 2.4930 - val_accuracy: 0.0465 - val_loss: 2.4753 Epoch 2/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 232ms/step - accuracy: 0.0424 - loss: 2.4990 - val_accuracy: 0.1163 - val_loss: 2.4744 Epoch 3/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 246ms/step - accuracy: 0.0573 - loss: 2.5670 - val_accuracy: 0.0930 - val_loss: 2.4587 Epoch 4/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 240ms/step - accuracy: 0.0473 - loss: 2.4481 - val_accuracy: 0.1628 - val_loss: 2.4877 Epoch 5/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 248ms/step - accuracy: 0.0587 - loss: 2.5303 - val_accuracy: 0.0698 - val_loss: 2.4599 Epoch 6/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 256ms/step - accuracy: 0.0484 - loss: 2.4697 - val_accuracy: 0.0465 - val_loss: 2.5085 Epoch 7/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 260ms/step - accuracy: 0.0428 - loss: 2.4395 - val_accuracy: 0.0465 - val_loss: 2.4894 Epoch 8/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 274ms/step - accuracy: 0.1157 - loss: 2.3349 - val_accuracy: 0.0000e+00 - val_loss: 2.5089 Epoch 9/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 268ms/step - accuracy: 0.0399 - loss: 2.5388 - val_accuracy: 0.1163 - val_loss: 2.4811 Epoch 10/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 269ms/step - accuracy: 0.0817 - loss: 2.4469 - val_accuracy: 0.1163 - val_loss: 2.4804 Epoch 11/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 259ms/step - accuracy: 0.0525 - loss: 2.5627 - val_accuracy: 0.0930 - val_loss: 2.4781 Epoch 12/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 266ms/step - accuracy: 0.0570 - loss: 2.5602 - val_accuracy: 0.0465 - val_loss: 2.4819 Epoch 13/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 289ms/step - accuracy: 0.0273 - loss: 2.4125 - val_accuracy: 0.0465 - val_loss: 2.4934 Epoch 14/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 284ms/step - accuracy: 0.0446 - loss: 2.4134 - val_accuracy: 0.1163 - val_loss: 2.5172 Epoch 15/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 291ms/step - accuracy: 0.0634 - loss: 2.5122 - val_accuracy: 0.0465 - val_loss: 2.4929 Epoch 16/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 269ms/step - accuracy: 0.0329 - loss: 2.6276 - val_accuracy: 0.0465 - val_loss: 2.4786 Epoch 17/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 356ms/step - accuracy: 0.0632 - loss: 2.5879 - val_accuracy: 0.0465 - val_loss: 2.4754 Epoch 18/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 300ms/step - accuracy: 0.1078 - loss: 2.4689 - val_accuracy: 0.1628 - val_loss: 2.4692 Epoch 19/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 306ms/step - accuracy: 0.0717 - loss: 2.5274 - val_accuracy: 0.0000e+00 - val_loss: 2.4761 Epoch 20/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 336ms/step - accuracy: 0.0689 - loss: 2.6276 - val_accuracy: 0.3023 - val_loss: 2.4651 Epoch 21/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 331ms/step - accuracy: 0.0579 - loss: 2.4909 - val_accuracy: 0.0930 - val_loss: 2.4754 Epoch 22/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 316ms/step - accuracy: 0.0764 - loss: 2.5963 - val_accuracy: 0.0930 - val_loss: 2.4646 Epoch 23/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 325ms/step - accuracy: 0.0963 - loss: 2.5279 - val_accuracy: 0.1163 - val_loss: 2.4721 Epoch 24/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 334ms/step - accuracy: 0.0498 - loss: 2.4627 - val_accuracy: 0.1163 - val_loss: 2.4807 Epoch 25/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 360ms/step - accuracy: 0.0924 - loss: 2.5311 - val_accuracy: 0.1163 - val_loss: 2.4837 Epoch 26/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 345ms/step - accuracy: 0.0491 - loss: 2.5256 - val_accuracy: 0.1163 - val_loss: 2.4778 Epoch 27/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 326ms/step - accuracy: 0.0615 - loss: 2.5077 - val_accuracy: 0.1163 - val_loss: 2.4828 Epoch 28/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 319ms/step - accuracy: 0.0535 - loss: 2.5311 - val_accuracy: 0.1163 - val_loss: 2.4846 Epoch 29/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 327ms/step - accuracy: 0.1097 - loss: 2.5042 - val_accuracy: 0.0465 - val_loss: 2.4842 Epoch 30/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 317ms/step - accuracy: 0.0776 - loss: 2.4440 - val_accuracy: 0.0000e+00 - val_loss: 2.4844 Epoch 31/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 317ms/step - accuracy: 0.0654 - loss: 2.4493 - val_accuracy: 0.3023 - val_loss: 2.4810 Epoch 32/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 341ms/step - accuracy: 0.0677 - loss: 2.4727 - val_accuracy: 0.0000e+00 - val_loss: 2.4855 Epoch 33/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 340ms/step - accuracy: 0.0674 - loss: 2.6087 - val_accuracy: 0.3023 - val_loss: 2.4784 Epoch 34/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 327ms/step - accuracy: 0.0830 - loss: 2.5143 - val_accuracy: 0.0465 - val_loss: 2.4810 Epoch 35/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 327ms/step - accuracy: 0.1159 - loss: 2.4994 - val_accuracy: 0.0465 - val_loss: 2.4829 Epoch 36/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 323ms/step - accuracy: 0.0597 - loss: 2.5207 - val_accuracy: 0.0465 - val_loss: 2.4765 Epoch 37/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 318ms/step - accuracy: 0.0743 - loss: 2.4046 - val_accuracy: 0.0000e+00 - val_loss: 2.4824 Epoch 38/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 328ms/step - accuracy: 0.0863 - loss: 2.4676 - val_accuracy: 0.0465 - val_loss: 2.4820 Epoch 39/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 332ms/step - accuracy: 0.0554 - loss: 2.4451 - val_accuracy: 0.0465 - val_loss: 2.4819 Epoch 40/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 325ms/step - accuracy: 0.0581 - loss: 2.5951 - val_accuracy: 0.3023 - val_loss: 2.4807 Epoch 41/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 324ms/step - accuracy: 0.0571 - loss: 2.4475 - val_accuracy: 0.0465 - val_loss: 2.4854 Epoch 42/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 326ms/step - accuracy: 0.0639 - loss: 2.4495 - val_accuracy: 0.0465 - val_loss: 2.4865 Epoch 43/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 326ms/step - accuracy: 0.0296 - loss: 2.3673 - val_accuracy: 0.0000e+00 - val_loss: 2.4848 Epoch 44/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 323ms/step - accuracy: 0.0474 - loss: 2.4766 - val_accuracy: 0.0000e+00 - val_loss: 2.4844 Epoch 45/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 331ms/step - accuracy: 0.0713 - loss: 2.5960 - val_accuracy: 0.0930 - val_loss: 2.4802 Epoch 46/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 323ms/step - accuracy: 0.0659 - loss: 2.4898 - val_accuracy: 0.0465 - val_loss: 2.4811 Epoch 47/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 320ms/step - accuracy: 0.0552 - loss: 2.4254 - val_accuracy: 0.0465 - val_loss: 2.4810 Epoch 48/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 323ms/step - accuracy: 0.0646 - loss: 2.5581 - val_accuracy: 0.0465 - val_loss: 2.4870 Epoch 49/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 327ms/step - accuracy: 0.0887 - loss: 2.5007 - val_accuracy: 0.0000e+00 - val_loss: 2.4832 Epoch 50/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 330ms/step - accuracy: 0.0348 - loss: 2.5460 - val_accuracy: 0.0000e+00 - val_loss: 2.4822 Epoch 51/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 286ms/step - accuracy: 0.0684 - loss: 2.5058 - val_accuracy: 0.0465 - val_loss: 2.4810 Epoch 52/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 316ms/step - accuracy: 0.0599 - loss: 2.4830 - val_accuracy: 0.0000e+00 - val_loss: 2.4862 Epoch 53/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 320ms/step - accuracy: 0.0453 - loss: 2.4165 - val_accuracy: 0.0465 - val_loss: 2.4819 Epoch 54/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 320ms/step - accuracy: 0.0511 - loss: 2.5815 - val_accuracy: 0.0465 - val_loss: 2.4848 Epoch 55/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 320ms/step - accuracy: 0.0797 - loss: 2.4882 - val_accuracy: 0.0465 - val_loss: 2.4804 Epoch 56/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 320ms/step - accuracy: 0.0815 - loss: 2.3931 - val_accuracy: 0.3023 - val_loss: 2.4792 Epoch 57/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 336ms/step - accuracy: 0.0722 - loss: 2.4219 - val_accuracy: 0.0233 - val_loss: 2.4820 Epoch 58/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 316ms/step - accuracy: 0.0586 - loss: 2.5222 - val_accuracy: 0.0465 - val_loss: 2.4813 Epoch 59/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 317ms/step - accuracy: 0.1006 - loss: 2.4008 - val_accuracy: 0.0465 - val_loss: 2.4816 Epoch 60/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 330ms/step - accuracy: 0.0884 - loss: 2.6213 - val_accuracy: 0.1628 - val_loss: 2.4822 Epoch 61/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 322ms/step - accuracy: 0.0497 - loss: 2.6106 - val_accuracy: 0.0465 - val_loss: 2.4846 Epoch 62/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 324ms/step - accuracy: 0.0792 - loss: 2.4823 - val_accuracy: 0.0465 - val_loss: 2.4839 Epoch 63/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 320ms/step - accuracy: 0.0654 - loss: 2.4115 - val_accuracy: 0.0465 - val_loss: 2.4823 Epoch 64/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 330ms/step - accuracy: 0.1221 - loss: 2.5464 - val_accuracy: 0.0465 - val_loss: 2.4815 Epoch 65/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 293ms/step - accuracy: 0.1644 - loss: 2.5043 - val_accuracy: 0.3023 - val_loss: 2.4854 Epoch 66/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 324ms/step - accuracy: 0.1631 - loss: 2.5121 - val_accuracy: 0.0233 - val_loss: 2.4838 Epoch 67/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 329ms/step - accuracy: 0.1278 - loss: 2.5387 - val_accuracy: 0.3023 - val_loss: 2.4830 Epoch 68/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 322ms/step - accuracy: 0.1248 - loss: 2.4172 - val_accuracy: 0.0465 - val_loss: 2.4873 Epoch 69/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 315ms/step - accuracy: 0.0881 - loss: 2.4394 - val_accuracy: 0.0465 - val_loss: 2.4868 Epoch 70/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 325ms/step - accuracy: 0.0977 - loss: 2.3908 - val_accuracy: 0.0465 - val_loss: 2.4842 Epoch 71/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 334ms/step - accuracy: 0.0721 - loss: 2.4261 - val_accuracy: 0.0465 - val_loss: 2.4856 Epoch 72/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 320ms/step - accuracy: 0.0571 - loss: 2.5577 - val_accuracy: 0.0465 - val_loss: 2.4854 Epoch 73/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 320ms/step - accuracy: 0.0500 - loss: 2.5106 - val_accuracy: 0.0465 - val_loss: 2.4844 Epoch 74/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 327ms/step - accuracy: 0.1123 - loss: 2.6126 - val_accuracy: 0.0465 - val_loss: 2.4798 Epoch 75/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 317ms/step - accuracy: 0.1317 - loss: 2.4100 - val_accuracy: 0.1163 - val_loss: 2.4805 Epoch 76/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 323ms/step - accuracy: 0.0916 - loss: 2.4437 - val_accuracy: 0.0465 - val_loss: 2.4824 Epoch 77/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 321ms/step - accuracy: 0.0772 - loss: 2.5086 - val_accuracy: 0.0465 - val_loss: 2.4811 Epoch 78/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 323ms/step - accuracy: 0.1085 - loss: 2.4405 - val_accuracy: 0.0465 - val_loss: 2.4819 Epoch 79/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 329ms/step - accuracy: 0.0593 - loss: 2.5594 - val_accuracy: 0.1163 - val_loss: 2.4812 Epoch 80/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 320ms/step - accuracy: 0.1076 - loss: 2.4529 - val_accuracy: 0.0465 - val_loss: 2.4871 Epoch 81/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 325ms/step - accuracy: 0.1084 - loss: 2.5264 - val_accuracy: 0.0465 - val_loss: 2.4856 Epoch 82/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 325ms/step - accuracy: 0.0447 - loss: 2.3489 - val_accuracy: 0.0465 - val_loss: 2.4830 Epoch 83/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 323ms/step - accuracy: 0.1151 - loss: 2.4482 - val_accuracy: 0.0465 - val_loss: 2.4825 Epoch 84/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 332ms/step - accuracy: 0.1087 - loss: 2.5465 - val_accuracy: 0.0465 - val_loss: 2.4790 Epoch 85/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 322ms/step - accuracy: 0.1001 - loss: 2.4042 - val_accuracy: 0.1163 - val_loss: 2.4821 Epoch 86/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 324ms/step - accuracy: 0.0607 - loss: 2.5136 - val_accuracy: 0.0930 - val_loss: 2.4825 Epoch 87/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 324ms/step - accuracy: 0.0494 - loss: 2.4349 - val_accuracy: 0.0930 - val_loss: 2.4800 Epoch 88/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 327ms/step - accuracy: 0.1128 - loss: 2.4188 - val_accuracy: 0.1163 - val_loss: 2.4783 Epoch 89/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 319ms/step - accuracy: 0.0502 - loss: 2.5515 - val_accuracy: 0.1163 - val_loss: 2.4778 Epoch 90/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 321ms/step - accuracy: 0.0880 - loss: 2.4616 - val_accuracy: 0.1163 - val_loss: 2.4809 Epoch 91/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 317ms/step - accuracy: 0.0532 - loss: 2.3696 - val_accuracy: 0.1163 - val_loss: 2.4787 Epoch 92/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 321ms/step - accuracy: 0.0595 - loss: 2.4913 - val_accuracy: 0.1163 - val_loss: 2.4794 Epoch 93/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 333ms/step - accuracy: 0.0862 - loss: 2.4433 - val_accuracy: 0.0465 - val_loss: 2.4826 Epoch 94/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 317ms/step - accuracy: 0.0914 - loss: 2.5161 - val_accuracy: 0.1163 - val_loss: 2.4828 Epoch 95/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 357ms/step - accuracy: 0.0526 - loss: 2.4410 - val_accuracy: 0.0000e+00 - val_loss: 2.4840 Epoch 96/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 371ms/step - accuracy: 0.0771 - loss: 2.3987 - val_accuracy: 0.1628 - val_loss: 2.4838 Epoch 97/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 333ms/step - accuracy: 0.0742 - loss: 2.5625 - val_accuracy: 0.1628 - val_loss: 2.4841 Epoch 98/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 328ms/step - accuracy: 0.0773 - loss: 2.6579 - val_accuracy: 0.1628 - val_loss: 2.4842 Epoch 99/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 330ms/step - accuracy: 0.0556 - loss: 2.3933 - val_accuracy: 0.1628 - val_loss: 2.4839 Epoch 100/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 320ms/step - accuracy: 0.0449 - loss: 2.4582 - val_accuracy: 0.0233 - val_loss: 2.4840 WARNING:tensorflow:5 out of the last 5 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x000001F7ACF89700> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details. 1/2 ━━━━━━━━━━━━━━━━━━━━ 0s 295ms/stepWARNING:tensorflow:6 out of the last 6 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x000001F7ACF89700> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details. 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 240ms/step Test Accuracy: 2.33% Average Test Precision: 0.002 Average Test Recall: 0.100 Average Test F1-score: 0.005
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
In [22]:
plt.plot(history_conv_rnn.history['loss'])
plt.plot(history_conv_rnn.history['val_loss'])
plt.title('Loss vs Epoch for a RNN with a 1D Convolutional Filter')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['loss', 'val_loss'], loc='upper right')
plt.show()
plt.plot(history_conv_rnn.history['accuracy'])
plt.plot(history_conv_rnn.history['val_accuracy'])
plt.title('Accuracy vs Epoch for a RNN with a 1D Convolutional Filter')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['accuracy', 'val_accuracy'], loc='lower right')
plt.show()
In [32]:
# try a simple lstm for comparison
dropout_rate = 0.3
model_single_lstm = Sequential([
LSTM(128, input_shape=(517, 128)),
Dropout(dropout_rate),
Dense(512, activation="relu"),
Dense(12, activation="softmax") # Final output (12 classes)
])
# Compile model
model_single_lstm.compile(
optimizer="rmsprop",
loss="categorical_crossentropy",
metrics=["accuracy"]
)
print(model_single_lstm.summary())
Model: "sequential_8"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ lstm_10 (LSTM) │ (None, 128) │ 131,584 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout_10 (Dropout) │ (None, 128) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_16 (Dense) │ (None, 512) │ 66,048 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_17 (Dense) │ (None, 12) │ 6,156 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 203,788 (796.05 KB)
Trainable params: 203,788 (796.05 KB)
Non-trainable params: 0 (0.00 B)
None
In [ ]:
history_single_lstm = model_single_lstm.fit(train_data, train_labels, validation_data=(test_data, test_labels),
batch_size=batch_size,
epochs=100,
class_weight=class_weight_dict)
#callbacks=[early_stopping])
test_pred = model_single_lstm.predict(test_data)
Epoch 1/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 149ms/step - accuracy: 0.1254 - loss: 2.3575 - val_accuracy: 0.0698 - val_loss: 2.5052 Epoch 2/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 141ms/step - accuracy: 0.1368 - loss: 2.2519 - val_accuracy: 0.0930 - val_loss: 2.5702 Epoch 3/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 147ms/step - accuracy: 0.1629 - loss: 2.3061 - val_accuracy: 0.1163 - val_loss: 2.5655 Epoch 4/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 144ms/step - accuracy: 0.1656 - loss: 2.2513 - val_accuracy: 0.0465 - val_loss: 2.5019 Epoch 5/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 153ms/step - accuracy: 0.1803 - loss: 2.2650 - val_accuracy: 0.1163 - val_loss: 2.5860 Epoch 6/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 160ms/step - accuracy: 0.1749 - loss: 2.3012 - val_accuracy: 0.0698 - val_loss: 2.4755 Epoch 7/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 168ms/step - accuracy: 0.1350 - loss: 2.2926 - val_accuracy: 0.0000e+00 - val_loss: 2.8231 Epoch 8/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 161ms/step - accuracy: 0.1153 - loss: 2.3271 - val_accuracy: 0.0698 - val_loss: 2.5841 Epoch 9/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 160ms/step - accuracy: 0.1780 - loss: 2.3870 - val_accuracy: 0.1628 - val_loss: 2.3999 Epoch 10/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 163ms/step - accuracy: 0.1244 - loss: 2.3052 - val_accuracy: 0.0233 - val_loss: 2.5279 Epoch 11/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 179ms/step - accuracy: 0.1384 - loss: 2.3372 - val_accuracy: 0.1163 - val_loss: 2.4971 Epoch 12/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 182ms/step - accuracy: 0.1206 - loss: 2.3505 - val_accuracy: 0.0000e+00 - val_loss: 2.8618 Epoch 13/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 174ms/step - accuracy: 0.1253 - loss: 2.5892 - val_accuracy: 0.0698 - val_loss: 2.3811 Epoch 14/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 190ms/step - accuracy: 0.1695 - loss: 2.3778 - val_accuracy: 0.0698 - val_loss: 2.4855 Epoch 15/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 172ms/step - accuracy: 0.1977 - loss: 2.2970 - val_accuracy: 0.0930 - val_loss: 2.4968 Epoch 16/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 186ms/step - accuracy: 0.1640 - loss: 2.2967 - val_accuracy: 0.0698 - val_loss: 2.3756 Epoch 17/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 206ms/step - accuracy: 0.1494 - loss: 2.4875 - val_accuracy: 0.0465 - val_loss: 2.4643 Epoch 18/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 206ms/step - accuracy: 0.2197 - loss: 2.1490 - val_accuracy: 0.0698 - val_loss: 2.5426 Epoch 19/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 207ms/step - accuracy: 0.1531 - loss: 2.3137 - val_accuracy: 0.1395 - val_loss: 2.5238 Epoch 20/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 221ms/step - accuracy: 0.1698 - loss: 2.3404 - val_accuracy: 0.1395 - val_loss: 2.4683 Epoch 21/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 236ms/step - accuracy: 0.1719 - loss: 2.2313 - val_accuracy: 0.0930 - val_loss: 2.5524 Epoch 22/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 210ms/step - accuracy: 0.1908 - loss: 2.3460 - val_accuracy: 0.0698 - val_loss: 2.6233 Epoch 23/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 207ms/step - accuracy: 0.1706 - loss: 2.2765 - val_accuracy: 0.1163 - val_loss: 2.5644 Epoch 24/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 204ms/step - accuracy: 0.1794 - loss: 2.2255 - val_accuracy: 0.0698 - val_loss: 2.5751 Epoch 25/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 194ms/step - accuracy: 0.1469 - loss: 2.3286 - val_accuracy: 0.1163 - val_loss: 2.5341 Epoch 26/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 193ms/step - accuracy: 0.2177 - loss: 2.1128 - val_accuracy: 0.0930 - val_loss: 2.6289 Epoch 27/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 200ms/step - accuracy: 0.1946 - loss: 2.3390 - val_accuracy: 0.0930 - val_loss: 2.5545 Epoch 28/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 195ms/step - accuracy: 0.1626 - loss: 2.4107 - val_accuracy: 0.0698 - val_loss: 2.5059 Epoch 29/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 187ms/step - accuracy: 0.1648 - loss: 2.3199 - val_accuracy: 0.0465 - val_loss: 2.7014 Epoch 30/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 187ms/step - accuracy: 0.1749 - loss: 2.3679 - val_accuracy: 0.0930 - val_loss: 2.5068 Epoch 31/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 204ms/step - accuracy: 0.1943 - loss: 2.2722 - val_accuracy: 0.1395 - val_loss: 2.4427 Epoch 32/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 186ms/step - accuracy: 0.1750 - loss: 2.1349 - val_accuracy: 0.0465 - val_loss: 2.6105 Epoch 33/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 207ms/step - accuracy: 0.1162 - loss: 2.2982 - val_accuracy: 0.1628 - val_loss: 2.5422 Epoch 34/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 206ms/step - accuracy: 0.1475 - loss: 2.3431 - val_accuracy: 0.1163 - val_loss: 2.5797 Epoch 35/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 194ms/step - accuracy: 0.1268 - loss: 2.3796 - val_accuracy: 0.0465 - val_loss: 2.7592 Epoch 36/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 186ms/step - accuracy: 0.1251 - loss: 2.2949 - val_accuracy: 0.1163 - val_loss: 2.6145 Epoch 37/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 190ms/step - accuracy: 0.1366 - loss: 2.2913 - val_accuracy: 0.0930 - val_loss: 2.4852 Epoch 38/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 189ms/step - accuracy: 0.2076 - loss: 2.2829 - val_accuracy: 0.1163 - val_loss: 2.6045 Epoch 39/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 193ms/step - accuracy: 0.1098 - loss: 2.4463 - val_accuracy: 0.2093 - val_loss: 2.4610 Epoch 40/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 190ms/step - accuracy: 0.1575 - loss: 2.3242 - val_accuracy: 0.0930 - val_loss: 2.4976 Epoch 41/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 188ms/step - accuracy: 0.1361 - loss: 2.3667 - val_accuracy: 0.0698 - val_loss: 2.6269 Epoch 42/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 185ms/step - accuracy: 0.1900 - loss: 2.2320 - val_accuracy: 0.2326 - val_loss: 2.3870 Epoch 43/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 184ms/step - accuracy: 0.2540 - loss: 2.1875 - val_accuracy: 0.0465 - val_loss: 2.6669 Epoch 44/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 186ms/step - accuracy: 0.1721 - loss: 2.3262 - val_accuracy: 0.1163 - val_loss: 2.5053 Epoch 45/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 182ms/step - accuracy: 0.1681 - loss: 2.1739 - val_accuracy: 0.0698 - val_loss: 2.6060 Epoch 46/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 189ms/step - accuracy: 0.2028 - loss: 2.1294 - val_accuracy: 0.0930 - val_loss: 2.5381 Epoch 47/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 187ms/step - accuracy: 0.1554 - loss: 2.1857 - val_accuracy: 0.0930 - val_loss: 2.6240 Epoch 48/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 184ms/step - accuracy: 0.1573 - loss: 2.2997 - val_accuracy: 0.0930 - val_loss: 2.6370 Epoch 49/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 186ms/step - accuracy: 0.1954 - loss: 2.3461 - val_accuracy: 0.1395 - val_loss: 2.4487 Epoch 50/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 184ms/step - accuracy: 0.1827 - loss: 2.1847 - val_accuracy: 0.1395 - val_loss: 2.4500 Epoch 51/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 180ms/step - accuracy: 0.1905 - loss: 2.2767 - val_accuracy: 0.1860 - val_loss: 2.3807 Epoch 52/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 203ms/step - accuracy: 0.2149 - loss: 2.1738 - val_accuracy: 0.1163 - val_loss: 2.5749 Epoch 53/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 194ms/step - accuracy: 0.2714 - loss: 2.1968 - val_accuracy: 0.1860 - val_loss: 2.5120 Epoch 54/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 190ms/step - accuracy: 0.1701 - loss: 2.1033 - val_accuracy: 0.1860 - val_loss: 2.4154 Epoch 55/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 190ms/step - accuracy: 0.1999 - loss: 2.2187 - val_accuracy: 0.0698 - val_loss: 2.4995 Epoch 56/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 186ms/step - accuracy: 0.2356 - loss: 2.0950 - val_accuracy: 0.1628 - val_loss: 2.5946 Epoch 57/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 181ms/step - accuracy: 0.1536 - loss: 2.2159 - val_accuracy: 0.0930 - val_loss: 2.5933 Epoch 58/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 182ms/step - accuracy: 0.2001 - loss: 2.2491 - val_accuracy: 0.1395 - val_loss: 2.4942 Epoch 59/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 183ms/step - accuracy: 0.2584 - loss: 2.0046 - val_accuracy: 0.1163 - val_loss: 2.4325 Epoch 60/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 184ms/step - accuracy: 0.1637 - loss: 2.3336 - val_accuracy: 0.0698 - val_loss: 2.5579 Epoch 61/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 179ms/step - accuracy: 0.1685 - loss: 2.2157 - val_accuracy: 0.0233 - val_loss: 2.6240 Epoch 62/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 183ms/step - accuracy: 0.1360 - loss: 2.1851 - val_accuracy: 0.0465 - val_loss: 2.5460 Epoch 63/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 187ms/step - accuracy: 0.2055 - loss: 2.1370 - val_accuracy: 0.1628 - val_loss: 2.4307 Epoch 64/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 187ms/step - accuracy: 0.1972 - loss: 2.2338 - val_accuracy: 0.1395 - val_loss: 2.3995 Epoch 65/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 186ms/step - accuracy: 0.2203 - loss: 2.1531 - val_accuracy: 0.1860 - val_loss: 2.4662 Epoch 66/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 180ms/step - accuracy: 0.1907 - loss: 2.1064 - val_accuracy: 0.1163 - val_loss: 2.4718 Epoch 67/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 179ms/step - accuracy: 0.1831 - loss: 2.2007 - val_accuracy: 0.1163 - val_loss: 2.4352 Epoch 68/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 187ms/step - accuracy: 0.1988 - loss: 2.3201 - val_accuracy: 0.1395 - val_loss: 2.4707 Epoch 69/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 183ms/step - accuracy: 0.1924 - loss: 2.2650 - val_accuracy: 0.0698 - val_loss: 2.5694 Epoch 70/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 180ms/step - accuracy: 0.1750 - loss: 2.3167 - val_accuracy: 0.1628 - val_loss: 2.5482 Epoch 71/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 187ms/step - accuracy: 0.1297 - loss: 2.1850 - val_accuracy: 0.1860 - val_loss: 2.5758 Epoch 72/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 190ms/step - accuracy: 0.1848 - loss: 2.1860 - val_accuracy: 0.0698 - val_loss: 2.6492 Epoch 73/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 184ms/step - accuracy: 0.2012 - loss: 2.0454 - val_accuracy: 0.0930 - val_loss: 2.5674 Epoch 74/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 184ms/step - accuracy: 0.1875 - loss: 2.1741 - val_accuracy: 0.1163 - val_loss: 2.5667 Epoch 75/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 188ms/step - accuracy: 0.1352 - loss: 2.2435 - val_accuracy: 0.0930 - val_loss: 2.5373 Epoch 76/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 187ms/step - accuracy: 0.1414 - loss: 2.3623 - val_accuracy: 0.1628 - val_loss: 2.4913 Epoch 77/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 182ms/step - accuracy: 0.1559 - loss: 2.3439 - val_accuracy: 0.0930 - val_loss: 2.5697 Epoch 78/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 190ms/step - accuracy: 0.1336 - loss: 2.1622 - val_accuracy: 0.1163 - val_loss: 2.4733 Epoch 79/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 183ms/step - accuracy: 0.2084 - loss: 2.1772 - val_accuracy: 0.0930 - val_loss: 2.4785 Epoch 80/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 179ms/step - accuracy: 0.1898 - loss: 2.1881 - val_accuracy: 0.1163 - val_loss: 2.4268 Epoch 81/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 173ms/step - accuracy: 0.1873 - loss: 2.1834 - val_accuracy: 0.0465 - val_loss: 2.4735 Epoch 82/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 185ms/step - accuracy: 0.1855 - loss: 2.1128 - val_accuracy: 0.0465 - val_loss: 2.7505 Epoch 83/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 180ms/step - accuracy: 0.1846 - loss: 2.2163 - val_accuracy: 0.1395 - val_loss: 2.4830 Epoch 84/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 187ms/step - accuracy: 0.1655 - loss: 2.2522 - val_accuracy: 0.1163 - val_loss: 2.5875 Epoch 85/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 183ms/step - accuracy: 0.2388 - loss: 2.2316 - val_accuracy: 0.0930 - val_loss: 2.5626 Epoch 86/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 188ms/step - accuracy: 0.1579 - loss: 2.3265 - val_accuracy: 0.0465 - val_loss: 2.6043 Epoch 87/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 187ms/step - accuracy: 0.1562 - loss: 2.4067 - val_accuracy: 0.1395 - val_loss: 2.5352 Epoch 88/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 184ms/step - accuracy: 0.2755 - loss: 2.0800 - val_accuracy: 0.1163 - val_loss: 2.4791 Epoch 89/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 184ms/step - accuracy: 0.1837 - loss: 2.1865 - val_accuracy: 0.1395 - val_loss: 2.4318 Epoch 90/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 179ms/step - accuracy: 0.2355 - loss: 2.0790 - val_accuracy: 0.0233 - val_loss: 2.6204 Epoch 91/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 173ms/step - accuracy: 0.1875 - loss: 2.2027 - val_accuracy: 0.0930 - val_loss: 2.5403 Epoch 92/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 182ms/step - accuracy: 0.1694 - loss: 2.2302 - val_accuracy: 0.0930 - val_loss: 2.4513 Epoch 93/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 187ms/step - accuracy: 0.2194 - loss: 2.1155 - val_accuracy: 0.0233 - val_loss: 2.5806 Epoch 94/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 187ms/step - accuracy: 0.2154 - loss: 2.1426 - val_accuracy: 0.0465 - val_loss: 2.4105 Epoch 95/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 182ms/step - accuracy: 0.2385 - loss: 2.1107 - val_accuracy: 0.1163 - val_loss: 2.5839 Epoch 96/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 180ms/step - accuracy: 0.2393 - loss: 2.0626 - val_accuracy: 0.1163 - val_loss: 2.6671 Epoch 97/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 183ms/step - accuracy: 0.1870 - loss: 2.1392 - val_accuracy: 0.1395 - val_loss: 2.5549 Epoch 98/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 180ms/step - accuracy: 0.2176 - loss: 2.1651 - val_accuracy: 0.1395 - val_loss: 2.6359 Epoch 99/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 186ms/step - accuracy: 0.2302 - loss: 2.1999 - val_accuracy: 0.1860 - val_loss: 2.5613 Epoch 100/100 6/6 ━━━━━━━━━━━━━━━━━━━━ 1s 184ms/step - accuracy: 0.2243 - loss: 2.0661 - val_accuracy: 0.1163 - val_loss: 2.5612 2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 39ms/step
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In[34], line 12 9 y_true_classes = np.where(np.any(test_labels, axis=1)) 10 y_pred_classes = np.argmax(test_pred, axis=1) ---> 12 test_accuracy = accuracy_score(y_true_classes, y_pred_classes) 13 precision, recall, f1, _ = precision_recall_fscore_support(y_true_classes, y_pred_classes, average='macro') 15 print(f"Test Accuracy: {test_accuracy*100:.2f}%") File c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\utils\_param_validation.py:216, in validate_params.<locals>.decorator.<locals>.wrapper(*args, **kwargs) 210 try: 211 with config_context( 212 skip_parameter_validation=( 213 prefer_skip_nested_validation or global_skip_validation 214 ) 215 ): --> 216 return func(*args, **kwargs) 217 except InvalidParameterError as e: 218 # When the function is just a wrapper around an estimator, we allow 219 # the function to delegate validation to the estimator, but we replace 220 # the name of the estimator by the name of the function in the error 221 # message to avoid confusion. 222 msg = re.sub( 223 r"parameter of \w+ must be", 224 f"parameter of {func.__qualname__} must be", 225 str(e), 226 ) File c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:227, in accuracy_score(y_true, y_pred, normalize, sample_weight) 225 # Compute accuracy for each possible representation 226 y_true, y_pred = attach_unique(y_true, y_pred) --> 227 y_type, y_true, y_pred = _check_targets(y_true, y_pred) 228 check_consistent_length(y_true, y_pred, sample_weight) 230 if y_type.startswith("multilabel"): File c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:98, in _check_targets(y_true, y_pred) 71 """Check that y_true and y_pred belong to the same classification task. 72 73 This converts multiclass or binary types to a common shape, and raises a (...) 95 y_pred : array or indicator matrix 96 """ 97 xp, _ = get_namespace(y_true, y_pred) ---> 98 check_consistent_length(y_true, y_pred) 99 type_true = type_of_target(y_true, input_name="y_true") 100 type_pred = type_of_target(y_pred, input_name="y_pred") File c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\utils\validation.py:475, in check_consistent_length(*arrays) 473 uniques = np.unique(lengths) 474 if len(uniques) > 1: --> 475 raise ValueError( 476 "Found input variables with inconsistent numbers of samples: %r" 477 % [int(l) for l in lengths] 478 ) ValueError: Found input variables with inconsistent numbers of samples: [1, 43]
In [36]:
y_true_classes = np.argmax(test_labels, axis=1)
y_pred_classes = np.argmax(test_pred, axis=1)
test_accuracy = accuracy_score(y_true_classes, y_pred_classes)
precision, recall, f1, _ = precision_recall_fscore_support(y_true_classes, y_pred_classes, average='macro')
print(f"Test Accuracy: {test_accuracy*100:.2f}%")
print(f"Average Test Precision: {precision:.3f}")
print(f"Average Test Recall: {recall:.3f}")
print(f"Average Test F1-score: {f1:.3f}")
Test Accuracy: 11.63% Average Test Precision: 0.046 Average Test Recall: 0.094 Average Test F1-score: 0.055
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
In [37]:
print(y_true_classes)
[10 2 6 8 6 9 1 6 7 6 8 6 1 6 3 8 8 5 5 9 8 6 6 2 8 8 10 1 1 6 6 6 9 1 2 9 2 9 4 6 7 6 4]
In [38]:
plt.plot(history_single_lstm.history['loss'])
plt.plot(history_single_lstm.history['val_loss'])
plt.title('Loss vs Epoch for a RNN with a 1D Convolutional Filter')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['loss', 'val_loss'], loc='upper right')
plt.show()
plt.plot(history_single_lstm.history['accuracy'])
plt.plot(history_single_lstm.history['val_accuracy'])
plt.title('Accuracy vs Epoch for a RNN with a 1D Convolutional Filter')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['accuracy', 'val_accuracy'], loc='lower right')
plt.show()
more exploration with the convolutional model¶
The best results so far have come from treating the spectrograms like an image and using a convolutional neural network. The results from attempting to use a recurrent network have not been promising. As a result, further exploration of the parameters for the convolutional model will be conducted. A model which uses a lstm network to
In [ ]:
train_data = np.transpose(eda_data[:,:, train_indices], (2, 0, 1)).reshape(-1, 128, 517, 1) # reshape and add single channel for cnn
train_labels = to_categorical(eda_data_labels[train_indices], 12)
test_data = np.transpose(eda_data[:,:, test_indices], (2, 0, 1)).reshape(-1, 128, 517, 1)
test_labels = to_categorical(eda_data_labels[test_indices], 12)
print(test_labels.shape)
print(train_labels.shape)
print(test_labels[0])
# create weights for training loss relative to response class imbalance
class_weights = compute_class_weight(class_weight="balanced",
classes=np.unique(eda_data_labels[train_indices], axis=0),
y=eda_data_labels[train_indices])
class_weight_dict = {i: class_weights[i] for i in range(len(class_weights))}
print(class_weight_dict)
In [61]:
conv_width = 3
conv_height = 3
pool_width = 2
dropout_rate = 0.3
model_cnn = Sequential(
[
Conv2D(filters=32, kernel_size=(conv_height, conv_width), padding='same', activation='relu', input_shape=(128,517, 1)),
MaxPooling2D(pool_size=(pool_width, pool_width)),
Conv2D(filters=64, kernel_size=(conv_height, conv_width), padding='same', activation='relu'),
MaxPooling2D(pool_size=(pool_width, pool_width)),
Conv2D(filters=128, kernel_size=(conv_height, conv_width), padding='same', activation='relu'),
MaxPooling2D(pool_size=(pool_width, pool_width)),
Conv2D(filters=256, kernel_size=(conv_height, conv_width), padding='same', activation='relu'),
MaxPooling2D(pool_size=(pool_width, pool_width)),
#Flatten(),
Permute((2, 1, 3)),
Reshape((32, 8*256)),
LSTM(128),
Dropout(rate=dropout_rate),
Dense(units=512, activation='relu'),
Dense(units=12, activation='softmax') # 12 bird species
])
early_stopping = EarlyStopping(
monitor='val_loss',
patience=15,
restore_best_weights=True
)
print(model_cnn.summary())
train_data = np.transpose(eda_data[:,:, train_indices], (2, 0, 1)).reshape(-1, 128, 517, 1) # reshape and add single channel for cnn
train_labels = to_categorical(eda_data_labels[train_indices], 12)
test_data = np.transpose(eda_data[:,:, test_indices], (2, 0, 1)).reshape(-1, 128, 517, 1)
test_labels = to_categorical(eda_data_labels[test_indices], 12)
print(test_labels.shape)
print(train_labels.shape)
print(test_labels[0])
# create weights for training loss relative to response class imbalance
class_weights = compute_class_weight(class_weight="balanced",
classes=np.unique(eda_data_labels[train_indices], axis=0),
y=eda_data_labels[train_indices])
class_weight_dict = {i: class_weights[i] for i in range(len(class_weights))}
print(class_weight_dict)
Model: "sequential_17"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ conv2d_36 (Conv2D) │ (None, 128, 517, 32) │ 320 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_36 (MaxPooling2D) │ (None, 64, 258, 32) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_37 (Conv2D) │ (None, 64, 258, 64) │ 18,496 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_37 (MaxPooling2D) │ (None, 32, 129, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_38 (Conv2D) │ (None, 32, 129, 128) │ 73,856 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_38 (MaxPooling2D) │ (None, 16, 64, 128) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_39 (Conv2D) │ (None, 16, 64, 256) │ 295,168 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_39 (MaxPooling2D) │ (None, 8, 32, 256) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ permute_3 (Permute) │ (None, 32, 8, 256) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ reshape_4 (Reshape) │ (None, 32, 2048) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ lstm_16 (LSTM) │ (None, 128) │ 1,114,624 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout_19 (Dropout) │ (None, 128) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_34 (Dense) │ (None, 512) │ 66,048 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_35 (Dense) │ (None, 12) │ 6,156 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 1,574,668 (6.01 MB)
Trainable params: 1,574,668 (6.01 MB)
Non-trainable params: 0 (0.00 B)
None
(43, 12)
(170, 12)
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0.]
{0: np.float64(1.4166666666666667), 1: np.float64(1.1805555555555556), 2: np.float64(1.4166666666666667), 3: np.float64(1.5740740740740742), 4: np.float64(1.4166666666666667), 5: np.float64(1.7708333333333333), 6: np.float64(0.2833333333333333), 7: np.float64(1.7708333333333333), 8: np.float64(1.2878787878787878), 9: np.float64(0.6746031746031746), 10: np.float64(1.2878787878787878), 11: np.float64(1.4166666666666667)}
In [62]:
%%time
batch_size = 30 # works of with ~170 train examples
model_cnn.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history_cnn = model_cnn.fit(train_data, train_labels,
epochs=75,
batch_size=batch_size,
validation_data=(test_data, test_labels),
verbose=1,
class_weight=class_weight_dict)
#callbacks=[early_stopping])
test_pred = model_cnn.predict(test_data)
y_true_classes = np.argmax(test_labels, axis=1)
y_pred_classes = np.argmax(test_pred, axis=1)
test_accuracy = accuracy_score(y_true_classes, y_pred_classes)
precision, recall, f1, _ = precision_recall_fscore_support(y_true_classes, y_pred_classes, average='macro')
print(f"Test Accuracy: {test_accuracy*100:.2f}%")
print(f"Average Test Precision: {precision:.3f}")
print(f"Average Test Recall: {recall:.3f}")
print(f"Average Test F1-score: {f1:.3f}")
Epoch 1/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 556ms/step - accuracy: 0.0389 - loss: 2.6377 - val_accuracy: 0.0465 - val_loss: 2.4849 Epoch 2/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 463ms/step - accuracy: 0.0552 - loss: 2.6948 - val_accuracy: 0.1163 - val_loss: 2.4616 Epoch 3/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 462ms/step - accuracy: 0.0859 - loss: 2.5383 - val_accuracy: 0.1163 - val_loss: 2.4942 Epoch 4/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 454ms/step - accuracy: 0.0429 - loss: 2.5627 - val_accuracy: 0.0465 - val_loss: 2.4644 Epoch 5/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 461ms/step - accuracy: 0.0585 - loss: 2.5520 - val_accuracy: 0.0465 - val_loss: 2.5004 Epoch 6/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 474ms/step - accuracy: 0.0986 - loss: 2.3706 - val_accuracy: 0.0930 - val_loss: 2.5060 Epoch 7/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 482ms/step - accuracy: 0.0517 - loss: 2.5738 - val_accuracy: 0.1163 - val_loss: 2.5161 Epoch 8/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 477ms/step - accuracy: 0.0568 - loss: 2.4909 - val_accuracy: 0.1163 - val_loss: 2.5054 Epoch 9/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 482ms/step - accuracy: 0.0571 - loss: 2.5955 - val_accuracy: 0.1163 - val_loss: 2.4865 Epoch 10/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 468ms/step - accuracy: 0.0507 - loss: 2.5313 - val_accuracy: 0.0000e+00 - val_loss: 2.5086 Epoch 11/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 468ms/step - accuracy: 0.0729 - loss: 2.6084 - val_accuracy: 0.1163 - val_loss: 2.4932 Epoch 12/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 481ms/step - accuracy: 0.0699 - loss: 2.6851 - val_accuracy: 0.1163 - val_loss: 2.4554 Epoch 13/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 511ms/step - accuracy: 0.0403 - loss: 2.6646 - val_accuracy: 0.0465 - val_loss: 2.4857 Epoch 14/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 3s 491ms/step - accuracy: 0.1003 - loss: 2.4177 - val_accuracy: 0.0465 - val_loss: 2.5082 Epoch 15/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 6s 648ms/step - accuracy: 0.0379 - loss: 2.5025 - val_accuracy: 0.1163 - val_loss: 2.4895 Epoch 16/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 798ms/step - accuracy: 0.0289 - loss: 2.4738 - val_accuracy: 0.0465 - val_loss: 2.5196 Epoch 17/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 769ms/step - accuracy: 0.0544 - loss: 2.4945 - val_accuracy: 0.1628 - val_loss: 2.4790 Epoch 18/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 708ms/step - accuracy: 0.0584 - loss: 2.5124 - val_accuracy: 0.0465 - val_loss: 2.4917 Epoch 19/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 725ms/step - accuracy: 0.1513 - loss: 2.4328 - val_accuracy: 0.1163 - val_loss: 2.5032 Epoch 20/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 630ms/step - accuracy: 0.0235 - loss: 2.4581 - val_accuracy: 0.0000e+00 - val_loss: 2.5351 Epoch 21/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 703ms/step - accuracy: 0.0847 - loss: 2.5003 - val_accuracy: 0.0000e+00 - val_loss: 2.4947 Epoch 22/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 714ms/step - accuracy: 0.0557 - loss: 2.5136 - val_accuracy: 0.1163 - val_loss: 2.5352 Epoch 23/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 712ms/step - accuracy: 0.0366 - loss: 2.4660 - val_accuracy: 0.0465 - val_loss: 2.5453 Epoch 24/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 658ms/step - accuracy: 0.0580 - loss: 2.4786 - val_accuracy: 0.0465 - val_loss: 2.5025 Epoch 25/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 668ms/step - accuracy: 0.0783 - loss: 2.4931 - val_accuracy: 0.0465 - val_loss: 2.5235 Epoch 26/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 662ms/step - accuracy: 0.0773 - loss: 2.5466 - val_accuracy: 0.0465 - val_loss: 2.4929 Epoch 27/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 706ms/step - accuracy: 0.0739 - loss: 2.4964 - val_accuracy: 0.0465 - val_loss: 2.4924 Epoch 28/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 646ms/step - accuracy: 0.0329 - loss: 2.5546 - val_accuracy: 0.1628 - val_loss: 2.4739 Epoch 29/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 689ms/step - accuracy: 0.0602 - loss: 2.4683 - val_accuracy: 0.1163 - val_loss: 2.4936 Epoch 30/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 673ms/step - accuracy: 0.0658 - loss: 2.5326 - val_accuracy: 0.0930 - val_loss: 2.4826 Epoch 31/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 646ms/step - accuracy: 0.0751 - loss: 2.3574 - val_accuracy: 0.0465 - val_loss: 2.5441 Epoch 32/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 652ms/step - accuracy: 0.0438 - loss: 2.5356 - val_accuracy: 0.0465 - val_loss: 2.4978 Epoch 33/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 676ms/step - accuracy: 0.0582 - loss: 2.5617 - val_accuracy: 0.0465 - val_loss: 2.4968 Epoch 34/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 680ms/step - accuracy: 0.0764 - loss: 2.6069 - val_accuracy: 0.0465 - val_loss: 2.4991 Epoch 35/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 660ms/step - accuracy: 0.0701 - loss: 2.4827 - val_accuracy: 0.0000e+00 - val_loss: 2.5250 Epoch 36/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 707ms/step - accuracy: 0.0302 - loss: 2.5636 - val_accuracy: 0.0000e+00 - val_loss: 2.4871 Epoch 37/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 646ms/step - accuracy: 0.1098 - loss: 2.4289 - val_accuracy: 0.0000e+00 - val_loss: 2.5190 Epoch 38/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 619ms/step - accuracy: 0.0929 - loss: 2.4824 - val_accuracy: 0.0465 - val_loss: 2.5125 Epoch 39/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 685ms/step - accuracy: 0.0659 - loss: 2.5120 - val_accuracy: 0.0465 - val_loss: 2.5046 Epoch 40/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 710ms/step - accuracy: 0.0628 - loss: 2.4805 - val_accuracy: 0.1628 - val_loss: 2.5108 Epoch 41/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 677ms/step - accuracy: 0.0656 - loss: 2.5464 - val_accuracy: 0.0465 - val_loss: 2.5363 Epoch 42/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 661ms/step - accuracy: 0.0460 - loss: 2.4274 - val_accuracy: 0.0000e+00 - val_loss: 2.5139 Epoch 43/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 5s 622ms/step - accuracy: 0.0476 - loss: 2.5582 - val_accuracy: 0.0930 - val_loss: 2.4955 Epoch 44/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 668ms/step - accuracy: 0.0875 - loss: 2.5376 - val_accuracy: 0.1163 - val_loss: 2.5024 Epoch 45/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 699ms/step - accuracy: 0.0267 - loss: 2.6676 - val_accuracy: 0.0465 - val_loss: 2.4927 Epoch 46/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 700ms/step - accuracy: 0.0550 - loss: 2.6143 - val_accuracy: 0.0465 - val_loss: 2.4905 Epoch 47/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 698ms/step - accuracy: 0.0689 - loss: 2.4236 - val_accuracy: 0.1628 - val_loss: 2.4902 Epoch 48/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 705ms/step - accuracy: 0.0872 - loss: 2.4614 - val_accuracy: 0.1628 - val_loss: 2.5005 Epoch 49/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 670ms/step - accuracy: 0.0783 - loss: 2.5246 - val_accuracy: 0.1163 - val_loss: 2.5167 Epoch 50/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 730ms/step - accuracy: 0.0510 - loss: 2.4449 - val_accuracy: 0.0465 - val_loss: 2.5365 Epoch 51/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 690ms/step - accuracy: 0.0959 - loss: 2.4928 - val_accuracy: 0.1628 - val_loss: 2.5041 Epoch 52/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 694ms/step - accuracy: 0.0546 - loss: 2.5461 - val_accuracy: 0.0000e+00 - val_loss: 2.5021 Epoch 53/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 698ms/step - accuracy: 0.0768 - loss: 2.5382 - val_accuracy: 0.0000e+00 - val_loss: 2.5071 Epoch 54/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 693ms/step - accuracy: 0.0569 - loss: 2.5128 - val_accuracy: 0.0000e+00 - val_loss: 2.5150 Epoch 55/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 678ms/step - accuracy: 0.0920 - loss: 2.5302 - val_accuracy: 0.0465 - val_loss: 2.5147 Epoch 56/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 731ms/step - accuracy: 0.0407 - loss: 2.4935 - val_accuracy: 0.0000e+00 - val_loss: 2.5186 Epoch 57/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 665ms/step - accuracy: 0.0360 - loss: 2.4660 - val_accuracy: 0.1163 - val_loss: 2.5268 Epoch 58/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 647ms/step - accuracy: 0.0524 - loss: 2.5193 - val_accuracy: 0.1163 - val_loss: 2.5181 Epoch 59/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 650ms/step - accuracy: 0.0501 - loss: 2.5411 - val_accuracy: 0.0465 - val_loss: 2.5271 Epoch 60/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 635ms/step - accuracy: 0.0628 - loss: 2.6719 - val_accuracy: 0.1163 - val_loss: 2.4971 Epoch 61/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 640ms/step - accuracy: 0.0651 - loss: 2.5680 - val_accuracy: 0.1163 - val_loss: 2.4807 Epoch 62/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 643ms/step - accuracy: 0.0800 - loss: 2.5534 - val_accuracy: 0.1628 - val_loss: 2.4892 Epoch 63/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 635ms/step - accuracy: 0.0781 - loss: 2.5905 - val_accuracy: 0.0000e+00 - val_loss: 2.5032 Epoch 64/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 657ms/step - accuracy: 0.0768 - loss: 2.5511 - val_accuracy: 0.0000e+00 - val_loss: 2.5073 Epoch 65/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 658ms/step - accuracy: 0.0682 - loss: 2.6059 - val_accuracy: 0.1628 - val_loss: 2.4883 Epoch 66/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 643ms/step - accuracy: 0.0589 - loss: 2.5339 - val_accuracy: 0.1628 - val_loss: 2.4970 Epoch 67/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 719ms/step - accuracy: 0.0675 - loss: 2.5363 - val_accuracy: 0.1628 - val_loss: 2.5026 Epoch 68/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 651ms/step - accuracy: 0.0898 - loss: 2.5283 - val_accuracy: 0.1628 - val_loss: 2.4965 Epoch 69/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 648ms/step - accuracy: 0.1188 - loss: 2.4468 - val_accuracy: 0.1628 - val_loss: 2.5110 Epoch 70/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 659ms/step - accuracy: 0.0685 - loss: 2.5492 - val_accuracy: 0.0000e+00 - val_loss: 2.5144 Epoch 71/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 675ms/step - accuracy: 0.0721 - loss: 2.4613 - val_accuracy: 0.0465 - val_loss: 2.5160 Epoch 72/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 676ms/step - accuracy: 0.0318 - loss: 2.4606 - val_accuracy: 0.0000e+00 - val_loss: 2.5289 Epoch 73/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 679ms/step - accuracy: 0.0328 - loss: 2.5262 - val_accuracy: 0.0000e+00 - val_loss: 2.5117 Epoch 74/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 677ms/step - accuracy: 0.0944 - loss: 2.5563 - val_accuracy: 0.0000e+00 - val_loss: 2.5210 Epoch 75/75 6/6 ━━━━━━━━━━━━━━━━━━━━ 4s 718ms/step - accuracy: 0.0838 - loss: 2.5004 - val_accuracy: 0.0000e+00 - val_loss: 2.5256 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 306ms/step Test Accuracy: 0.00% Average Test Precision: 0.000 Average Test Recall: 0.000 Average Test F1-score: 0.000 CPU times: total: 57min 32s Wall time: 5min 7s
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
In [64]:
plt.plot(history_cnn.history['loss'])
plt.plot(history_cnn.history['val_loss'])
plt.title('Loss vs Epoch for a RNN with a 2D Convolutional Filter')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['loss', 'val_loss'], loc='upper right')
plt.show()
plt.plot(history_cnn.history['accuracy'])
plt.plot(history_cnn.history['val_accuracy'])
plt.title('Accuracy vs Epoch for a RNN with a 2D Convolutional Filter')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['accuracy', 'val_accuracy'], loc='lower right')
plt.show()
In [89]:
# tuning the 2d convolutional model further, adding an LSTM layer did not work well
conv_shapes = np.array([3, 5, 7])
dropout_rates = np.array([0.1, 0.3, 0.5])
starting_filters = np.array([16, 32])
tune_grid = np.array(np.meshgrid(conv_shapes, dropout_rates, starting_filters)).T.reshape(-1, 3)
print(tune_grid.shape) # Display first 5 combinations
test_performances = []
for tune_index in range(tune_grid.shape[0]):
conv_width = int(round(tune_grid[tune_index, 0]))
dropout_rate = tune_grid[tune_index, 1]
starting_filters = int(round(tune_grid[tune_index, 2]))
print(f"Training model {tune_index + 1}...")
this_model = Sequential(
[
Conv2D(filters=starting_filters, kernel_size=(conv_width, conv_width), padding='same', activation='relu', input_shape=(128,517, 1)),
MaxPooling2D(pool_size=(2,2)),
Conv2D(filters=starting_filters*2, kernel_size=(conv_width, conv_width), padding='same', activation='relu'),
MaxPooling2D(pool_size=(2,2)),
Conv2D(filters=starting_filters*3, kernel_size=(conv_width, conv_width), padding='same', activation='relu'),
MaxPooling2D(pool_size=(2,2)),
Conv2D(filters=starting_filters*4, kernel_size=(conv_width, conv_width), padding='same', activation='relu'),
MaxPooling2D(pool_size=(2,2)),
Flatten(),
Dropout(rate=dropout_rate),
Dense(units=512, activation='relu'),
Dense(units=12, activation='softmax') # 12 bird species
])
this_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
original_weights = this_model.get_weights()
this_history = this_model.fit(train_data, train_labels,
epochs=75,
batch_size=20, #115 training examples
validation_data=(test_data, test_labels),
verbose=0,
class_weight=class_weight_dict)
# print and save the loss and accuracy plots, and pick the best epoch
out_dir = os.path.join(os.getcwd(), 'output', 'eda', 'plots')
plt.plot(this_history.history['loss'])
plt.plot(this_history.history['val_loss'])
plt.title('Loss vs Epoch for a RNN with a 2D Convolutional Filter')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['loss', 'val_loss'], loc='upper right')
plt.savefig(os.path.join(out_dir, f'multiclass_cnn_tune_loss_{tune_index + 1:02d}.png'))
plt.show()
plt.plot(this_history.history['accuracy'])
plt.plot(this_history.history['val_accuracy'])
plt.title('Accuracy vs Epoch for a CNN (Tuning)')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['accuracy', 'val_accuracy'], loc='lower right')
plt.savefig(os.path.join(out_dir, f'multiclass_cnn_tune_accuracy_{tune_index:02d}.png'))
plt.show()
val_acc = this_history.history['val_accuracy'] # Extract validation accuracy values
# Get the index of the highest accuracy (epochs start at 1)
# however, this is not always effective, and based on some exploration and looking at the plots,
# about 20 epochs does not appear to cause significant overfitting with this data and structure with the maximal parameter configuration.
# Requiring this minimum avoids cases were a random start or initial fluctuations lead to a very low best epoch
# that does not reproduce similar results
best_epoch = np.argmax(val_acc[19:]) + 1 + 19
print(f'Best epoch: {best_epoch}, Validation Accuracy: {val_acc[best_epoch-1]:.4f}')
# retrain and stop at best epoch from before
this_model.set_weights(original_weights)
this_history = this_model.fit(train_data, train_labels,
epochs=best_epoch,
batch_size=20, #115 training examples
validation_data=(test_data, test_labels),
verbose=0,
class_weight=class_weight_dict)
this_pred = this_model.predict(test_data)
y_true_classes = np.argmax(test_labels, axis=1)
this_pred_classes = np.argmax(this_pred, axis=1)
test_accuracy = accuracy_score(y_true_classes, this_pred_classes)
precision, recall, f1, _ = precision_recall_fscore_support(y_true_classes, this_pred_classes, average='macro')
print(f"Test Accuracy: {test_accuracy*100:.2f}%")
print(f"Average Test Precision: {precision:.3f}")
print(f"Average Test Recall: {recall:.3f}")
print(f"Average Test F1-score: {f1:.3f}")
test_performances.append({'epoch':best_epoch, 'accuracy':test_accuracy, 'average_precision':precision,
'average_recall':recall, 'average_f1':f1})
# save performances and parameters
params_df = pd.DataFrame(tune_grid, columns=['convolution_width', 'dropout_rate', 'starting_filter_cnt'])
results_df = pd.DataFrame(test_performances)
results_df = pd.concat([params_df, results_df], axis=1)
out_dir = os.path.join(os.getcwd(), 'output', 'eda', 'data')
results_df.to_csv(os.path.join(out_dir, 'tune_results_cnn_multiclass.csv'), index=False)
print(results_df)
(18, 3) Training model 1...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 36, Validation Accuracy: 0.2759 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 234ms/step Test Accuracy: 20.69% Average Test Precision: 0.034 Average Test Recall: 0.055 Average Test F1-score: 0.042 Training model 2...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 25, Validation Accuracy: 0.2414 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 209ms/step Test Accuracy: 17.24% Average Test Precision: 0.038 Average Test Recall: 0.042 Average Test F1-score: 0.040 Training model 3...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 40, Validation Accuracy: 0.2414 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 224ms/step Test Accuracy: 17.24% Average Test Precision: 0.038 Average Test Recall: 0.042 Average Test F1-score: 0.040 Training model 4...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 20, Validation Accuracy: 0.2069 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 303ms/step Test Accuracy: 17.24% Average Test Precision: 0.038 Average Test Recall: 0.042 Average Test F1-score: 0.040 Training model 5...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 30, Validation Accuracy: 0.2414 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 313ms/step Test Accuracy: 20.69% Average Test Precision: 0.028 Average Test Recall: 0.050 Average Test F1-score: 0.036 Training model 6...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 73, Validation Accuracy: 0.2759 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 311ms/step Test Accuracy: 17.24% Average Test Precision: 0.038 Average Test Recall: 0.042 Average Test F1-score: 0.040 Training model 7...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 20, Validation Accuracy: 0.2759 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 411ms/step Test Accuracy: 24.14% Average Test Precision: 0.099 Average Test Recall: 0.208 Average Test F1-score: 0.132 Training model 8...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 57, Validation Accuracy: 0.3103 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 367ms/step Test Accuracy: 20.69% Average Test Precision: 0.156 Average Test Recall: 0.144 Average Test F1-score: 0.111 Training model 9...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 44, Validation Accuracy: 0.3103 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 400ms/step Test Accuracy: 24.14% Average Test Precision: 0.039 Average Test Recall: 0.058 Average Test F1-score: 0.047 Training model 10...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 26, Validation Accuracy: 0.1724 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 349ms/step Test Accuracy: 6.90% Average Test Precision: 0.022 Average Test Recall: 0.055 Average Test F1-score: 0.023 Training model 11...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 23, Validation Accuracy: 0.2069 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 366ms/step Test Accuracy: 10.34% Average Test Precision: 0.025 Average Test Recall: 0.025 Average Test F1-score: 0.025 Training model 12...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 33, Validation Accuracy: 0.2759 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 365ms/step Test Accuracy: 24.14% Average Test Precision: 0.135 Average Test Recall: 0.153 Average Test F1-score: 0.135 Training model 13...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 24, Validation Accuracy: 0.1724 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 720ms/step Test Accuracy: 17.24% Average Test Precision: 0.047 Average Test Recall: 0.117 Average Test F1-score: 0.057 Training model 14...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 25, Validation Accuracy: 0.3448 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 394ms/step Test Accuracy: 3.45% Average Test Precision: 0.018 Average Test Recall: 0.045 Average Test F1-score: 0.026 Training model 15...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 41, Validation Accuracy: 0.2414 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 591ms/step Test Accuracy: 17.24% Average Test Precision: 0.042 Average Test Recall: 0.042 Average Test F1-score: 0.042 Training model 16...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 26, Validation Accuracy: 0.2414 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 886ms/step Test Accuracy: 6.90% Average Test Precision: 0.006 Average Test Recall: 0.091 Average Test F1-score: 0.012 Training model 17...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 54, Validation Accuracy: 0.2414 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 890ms/step Test Accuracy: 20.69% Average Test Precision: 0.101 Average Test Recall: 0.097 Average Test F1-score: 0.099 Training model 18...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 49, Validation Accuracy: 0.0690 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 795ms/step Test Accuracy: 3.45% Average Test Precision: 0.003 Average Test Recall: 0.091 Average Test F1-score: 0.006 convolution_width dropout_rate starting_filter_cnt epoch accuracy \ 0 3.0 0.1 16.0 36 0.206897 1 3.0 0.3 16.0 25 0.172414 2 3.0 0.5 16.0 40 0.172414 3 5.0 0.1 16.0 20 0.172414 4 5.0 0.3 16.0 30 0.206897 5 5.0 0.5 16.0 73 0.172414 6 7.0 0.1 16.0 20 0.241379 7 7.0 0.3 16.0 57 0.206897 8 7.0 0.5 16.0 44 0.241379 9 3.0 0.1 32.0 26 0.068966 10 3.0 0.3 32.0 23 0.103448 11 3.0 0.5 32.0 33 0.241379 12 5.0 0.1 32.0 24 0.172414 13 5.0 0.3 32.0 25 0.034483 14 5.0 0.5 32.0 41 0.172414 15 7.0 0.1 32.0 26 0.068966 16 7.0 0.3 32.0 54 0.206897 17 7.0 0.5 32.0 49 0.034483 average_precision average_recall average_f1 0 0.034091 0.054545 0.041958 1 0.037879 0.041667 0.039683 2 0.037879 0.041667 0.039683 3 0.037879 0.041667 0.039683 4 0.027778 0.050000 0.035714 5 0.037879 0.041667 0.039683 6 0.099206 0.208333 0.131944 7 0.155556 0.144444 0.111111 8 0.038889 0.058333 0.046667 9 0.021645 0.054545 0.022727 10 0.025000 0.025000 0.025000 11 0.135417 0.152778 0.135185 12 0.047222 0.116667 0.057143 13 0.018182 0.045455 0.025974 14 0.041667 0.041667 0.041667 15 0.006270 0.090909 0.011730 16 0.101010 0.096970 0.098884 17 0.003135 0.090909 0.006061
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
binary classification model¶
Exploration here will also inform the multiclassification model, but may be easier to explore with more examples per response category. This model will attempt to distinguis between the American Robin (amerob) and the Dark-Eyed Junco (daejun)
In [92]:
# Set up eda data
chosen = np.array(["amerob", "daejun"])
binary_data = np.concatenate([birds[key] for key in chosen], axis=-1)
binary_data_labels = np.concatenate([np.full(birds[key].shape[-1], ind) for (ind, key) in enumerate(chosen)])
def binaryLabelsToBirds(labels):
return chosen[labels]
In [ ]:
# pick a sample for training and validation
n_binary_samples = binary_data.shape[-1]
test_prop = 0.2
train_indices, test_indices = train_test_split(np.arange(0, binary_data.shape[-1]), test_size=test_prop, random_state=112358)
train_data = np.transpose(binary_data[:,:, train_indices], (2, 0, 1)).reshape(-1, 128, 517, 1) # reshape and add single channel for cnn
train_labels = binary_data_labels[train_indices]
train_data, train_labels = shuffle(train_data, train_labels, random_state=112358)
test_data = np.transpose(binary_data[:,:, test_indices], (2, 0, 1)).reshape(-1, 128, 517, 1)
test_labels = binary_data_labels[test_indices]
test_data, test_labels = shuffle(test_data, test_labels, random_state=112358)
print(test_labels.shape)
print(train_labels.shape)
print(test_labels[0])
# create weights for training loss relative to response class imbalance
binary_class_weights = compute_class_weight(class_weight="balanced",
classes=np.unique(binary_data_labels[train_indices], axis=0),
y=binary_data_labels[train_indices])
binary_class_weight_dict = {i: binary_class_weights[i] for i in range(len(binary_class_weights))}
print(binary_class_weight_dict)
(60,)
(237,)
0
{0: np.float64(1.0648148148148149), 1: np.float64(1.369047619047619), 2: np.float64(1.1979166666666667), 3: np.float64(1.1979166666666667), 4: np.float64(0.9583333333333334), 5: np.float64(1.369047619047619), 6: np.float64(0.45634920634920634), 7: np.float64(1.0648148148148149), 8: np.float64(1.1979166666666667), 9: np.float64(0.7986111111111112), 10: np.float64(1.1979166666666667), 11: np.float64(1.1979166666666667)}
In [ ]:
# tuning the 2d convolutional model
conv_shapes = np.array([3, 5, 7])
dropout_rates = np.array([0.1, 0.3, 0.5])
starting_filters = np.array([16, 32])
batch_size = 40 # splits ~240 training samples nicely
tune_grid = np.array(np.meshgrid(conv_shapes, dropout_rates, starting_filters)).T.reshape(-1, 3)
print(tune_grid.shape)
binary_test_performances = []
for tune_index in range(tune_grid.shape[0]):
conv_width = int(round(tune_grid[tune_index, 0]))
dropout_rate = tune_grid[tune_index, 1]
starting_filters = int(round(tune_grid[tune_index, 2]))
print(f"Training model {tune_index + 1}...")
this_model = Sequential(
[
Conv2D(filters=starting_filters, kernel_size=(conv_width, conv_width), padding='same', activation='relu', input_shape=(128,517, 1)),
MaxPooling2D(pool_size=(2,2)),
Conv2D(filters=starting_filters*2, kernel_size=(conv_width, conv_width), padding='same', activation='relu'),
MaxPooling2D(pool_size=(2,2)),
Conv2D(filters=starting_filters*3, kernel_size=(conv_width, conv_width), padding='same', activation='relu'),
MaxPooling2D(pool_size=(2,2)),
Conv2D(filters=starting_filters*4, kernel_size=(conv_width, conv_width), padding='same', activation='relu'),
MaxPooling2D(pool_size=(2,2)),
Flatten(),
Dropout(rate=dropout_rate),
Dense(units=512, activation='relu'),
Dense(units=1, activation='sigmoid')
])
this_model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
original_weights = this_model.get_weights()
this_history = this_model.fit(train_data, train_labels,
epochs=75,
batch_size=batch_size,
validation_data=(test_data, test_labels),
verbose=0,
class_weight=binary_class_weight_dict)
# print and save the loss and accuracy plots, and attempt to pick the best epoch
out_dir = os.path.join(os.getcwd(), 'output', 'eda', 'plots')
plt.plot(this_history.history['loss'])
plt.plot(this_history.history['val_loss'])
plt.title('Loss vs Epoch for a CNN (Binary Classification Tuning)')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['loss', 'val_loss'], loc='upper right')
plt.savefig(os.path.join(out_dir, f'binary_cnn_tune_loss_{tune_index + 1:02d}.png'))
plt.show()
plt.plot(this_history.history['accuracy'])
plt.plot(this_history.history['val_accuracy'])
plt.title('Accuracy vs Epoch for a CNN (Binary Classification Tuning)')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['accuracy', 'val_accuracy'], loc='lower right')
plt.savefig(os.path.join(out_dir, f'binary_cnn_tune_accuracy_{tune_index:02d}.png'))
plt.show()
val_acc = this_history.history['val_accuracy'] # Extract validation accuracy values
# Get the index of the highest accuracy (epochs start at 1)
# however, this is not always effective, and based on some exploration and looking at the plots,
# about 20 epochs does not appear to cause significant overfitting with this data and structure with the maximal parameter configuration.
# Requiring this minimum avoids cases were a random start or initial fluctuations lead to a very low best epoch
# that does not reproduce similar results
# beyond 4 in most of the versions seen so far tends to begin overfitting
# since this is not entirely reliable, the primary way to compare hyperparameter sets is the accuracy plots
best_epoch = min(np.argmax(val_acc[19:]) + 1 + 19, 40)
print(f'Best epoch: {best_epoch}, Validation Accuracy: {val_acc[best_epoch-1]:.4f}')
# retrain and stop at best epoch from before
this_model.set_weights(original_weights)
this_history = this_model.fit(train_data, train_labels,
epochs=best_epoch,
batch_size=40,
validation_data=(test_data, test_labels),
verbose=0,
class_weight=class_weight_dict)
this_pred = (this_model.predict(test_data) > 0.5)
test_accuracy = accuracy_score(test_labels, this_pred)
precision, recall, f1, _ = precision_recall_fscore_support(test_labels, this_pred, average='macro')
print(f"Test Accuracy: {test_accuracy*100:.2f}%")
print(f"Average Test Precision: {precision:.3f}")
print(f"Average Test Recall: {recall:.3f}")
print(f"Average Test F1-score: {f1:.3f}")
binary_test_performances.append({'epoch':best_epoch, 'accuracy':test_accuracy, 'average_precision':precision,
'average_recall':recall, 'average_f1':f1})
# save performances and parameters
binary_params_df = pd.DataFrame(tune_grid, columns=['convolution_width', 'dropout_rate', 'starting_filter_cnt'])
binary_results_df = pd.DataFrame(binary_test_performances)
binary_results_df = pd.concat([binary_params_df, binary_results_df], axis=1)
out_dir = os.path.join(os.getcwd(), 'output', 'eda', 'data')
binary_results_df.to_csv(os.path.join(out_dir, 'tune_results_cnn_binary.csv'), index=False)
print(binary_results_df)
(18, 3) Training model 1...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 32, Validation Accuracy: 0.9000 2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 196ms/step Test Accuracy: 83.33% Average Test Precision: 0.832 Average Test Recall: 0.823 Average Test F1-score: 0.826 Training model 2...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 40, Validation Accuracy: 0.7833 2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 171ms/step Test Accuracy: 81.67% Average Test Precision: 0.812 Average Test Recall: 0.809 Average Test F1-score: 0.810 Training model 3...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 40, Validation Accuracy: 0.7500 2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 187ms/step Test Accuracy: 81.67% Average Test Precision: 0.812 Average Test Recall: 0.820 Average Test F1-score: 0.814 Training model 4...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 37, Validation Accuracy: 0.9333 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 268ms/step Test Accuracy: 85.00% Average Test Precision: 0.850 Average Test Recall: 0.860 Average Test F1-score: 0.849 Training model 5...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 27, Validation Accuracy: 0.9333 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 248ms/step Test Accuracy: 86.67% Average Test Precision: 0.864 Average Test Recall: 0.874 Average Test F1-score: 0.865 Training model 6...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 40, Validation Accuracy: 0.9000 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 239ms/step Test Accuracy: 71.67% Average Test Precision: 0.775 Average Test Recall: 0.751 Average Test F1-score: 0.715 Training model 7...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 40, Validation Accuracy: 0.6167 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 349ms/step Test Accuracy: 95.00% Average Test Precision: 0.947 Average Test Recall: 0.951 Average Test F1-score: 0.949 Training model 8...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 36, Validation Accuracy: 0.9000 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 367ms/step Test Accuracy: 85.00% Average Test Precision: 0.846 Average Test Recall: 0.854 Average Test F1-score: 0.848 Training model 9...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 40, Validation Accuracy: 0.9167 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 307ms/step Test Accuracy: 85.00% Average Test Precision: 0.846 Average Test Recall: 0.854 Average Test F1-score: 0.848 Training model 10...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 35, Validation Accuracy: 0.9000 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 296ms/step Test Accuracy: 80.00% Average Test Precision: 0.823 Average Test Recall: 0.823 Average Test F1-score: 0.800 Training model 11...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 21, Validation Accuracy: 0.8667 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 319ms/step Test Accuracy: 60.00% Average Test Precision: 0.797 Average Test Recall: 0.520 Average Test F1-score: 0.411 Training model 12...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 40, Validation Accuracy: 0.8333 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 283ms/step Test Accuracy: 81.67% Average Test Precision: 0.824 Average Test Recall: 0.831 Average Test F1-score: 0.816 Training model 13...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 20, Validation Accuracy: 0.5833 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 532ms/step Test Accuracy: 41.67% Average Test Precision: 0.208 Average Test Recall: 0.500 Average Test F1-score: 0.294 Training model 14...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 40, Validation Accuracy: 0.7167 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 473ms/step Test Accuracy: 80.00% Average Test Precision: 0.795 Average Test Recall: 0.800 Average Test F1-score: 0.796 Training model 15...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 40, Validation Accuracy: 0.4167 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 502ms/step Test Accuracy: 80.00% Average Test Precision: 0.795 Average Test Recall: 0.800 Average Test F1-score: 0.796 Training model 16...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 40, Validation Accuracy: 0.6833 2/2 ━━━━━━━━━━━━━━━━━━━━ 2s 798ms/step Test Accuracy: 86.67% Average Test Precision: 0.862 Average Test Recall: 0.869 Average Test F1-score: 0.864 Training model 17...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 40, Validation Accuracy: 0.5833 2/2 ━━━━━━━━━━━━━━━━━━━━ 2s 743ms/step Test Accuracy: 83.33% Average Test Precision: 0.831 Average Test Recall: 0.840 Average Test F1-score: 0.832 Training model 18...
c:\Users\ellin\miniconda3\envs\keras_dl\lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Best epoch: 40, Validation Accuracy: 0.8500 2/2 ━━━━━━━━━━━━━━━━━━━━ 2s 742ms/step Test Accuracy: 60.00% Average Test Precision: 0.797 Average Test Recall: 0.520 Average Test F1-score: 0.411 convolution_width dropout_rate starting_filter_cnt epoch accuracy \ 0 3.0 0.1 16.0 32 0.833333 1 3.0 0.3 16.0 40 0.816667 2 3.0 0.5 16.0 40 0.816667 3 5.0 0.1 16.0 37 0.850000 4 5.0 0.3 16.0 27 0.866667 5 5.0 0.5 16.0 40 0.716667 6 7.0 0.1 16.0 40 0.950000 7 7.0 0.3 16.0 36 0.850000 8 7.0 0.5 16.0 40 0.850000 9 3.0 0.1 32.0 35 0.800000 10 3.0 0.3 32.0 21 0.600000 11 3.0 0.5 32.0 40 0.816667 12 5.0 0.1 32.0 20 0.416667 13 5.0 0.3 32.0 40 0.800000 14 5.0 0.5 32.0 40 0.800000 15 7.0 0.1 32.0 40 0.866667 16 7.0 0.3 32.0 40 0.833333 17 7.0 0.5 32.0 40 0.600000 average_precision average_recall average_f1 0 0.831962 0.822857 0.826389 1 0.812500 0.808571 0.810290 2 0.812500 0.820000 0.814137 3 0.850000 0.860000 0.848951 4 0.864294 0.874286 0.865320 5 0.775000 0.751429 0.714685 6 0.946833 0.951429 0.948849 7 0.845982 0.854286 0.847930 8 0.845982 0.854286 0.847930 9 0.822857 0.822857 0.800000 10 0.796610 0.520000 0.410802 11 0.823661 0.831429 0.816207 12 0.208333 0.500000 0.294118 13 0.794613 0.800000 0.796380 14 0.794613 0.800000 0.796380 15 0.861953 0.868571 0.864253 16 0.830923 0.840000 0.831650 17 0.796610 0.520000 0.410802